ricebug commited on
Commit
84a7631
·
verified ·
1 Parent(s): 73396fa

Upload 4 files

Browse files
Files changed (4) hide show
  1. Dockerfile +16 -0
  2. LICENSE +21 -0
  3. main.py +432 -0
  4. pyproject.toml +32 -0
Dockerfile ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim
2
+
3
+ WORKDIR /app
4
+
5
+ # Install dependencies
6
+ COPY pyproject.toml .
7
+ RUN uv sync
8
+
9
+ # Copy application code
10
+ COPY main.py .
11
+
12
+ # Expose the port the app runs on
13
+ EXPOSE 8000
14
+
15
+ # Command to run the application
16
+ CMD ["uv", "run", "uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2025 RrOrange
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
main.py ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import base64
3
+ import json
4
+ import logging
5
+ import os
6
+ import re
7
+ import tempfile
8
+ import time
9
+ import uuid
10
+ from datetime import datetime, timezone
11
+ from typing import Dict, List, Optional, Union
12
+
13
+ from fastapi import Depends, FastAPI, Header, HTTPException, Request
14
+ from fastapi.middleware.cors import CORSMiddleware
15
+ from fastapi.responses import JSONResponse, StreamingResponse
16
+ from gemini_webapi import GeminiClient, set_log_level
17
+ from gemini_webapi.constants import Model
18
+ from pydantic import BaseModel
19
+
20
+ # Configure logging
21
+ logging.basicConfig(level=logging.INFO)
22
+ logger = logging.getLogger(__name__)
23
+ set_log_level("INFO")
24
+
25
+ app = FastAPI(title="Gemini API FastAPI Server")
26
+
27
+ # Add CORS middleware
28
+ app.add_middleware(
29
+ CORSMiddleware,
30
+ allow_origins=["*"],
31
+ allow_credentials=True,
32
+ allow_methods=["*"],
33
+ allow_headers=["*"],
34
+ )
35
+
36
+ # Global client
37
+ gemini_client = None
38
+
39
+ # Authentication credentials
40
+ SECURE_1PSID = os.environ.get("SECURE_1PSID", "")
41
+ SECURE_1PSIDTS = os.environ.get("SECURE_1PSIDTS", "")
42
+ API_KEY = os.environ.get("API_KEY", "")
43
+ ENABLE_THINKING = os.environ.get("ENABLE_THINKING", "false").lower() == "true"
44
+
45
+ # Print debug info at startup
46
+ if not SECURE_1PSID or not SECURE_1PSIDTS:
47
+ logger.warning("⚠️ Gemini API credentials are not set or empty! Please check your environment variables.")
48
+ logger.warning("Make sure SECURE_1PSID and SECURE_1PSIDTS are correctly set in your .env file or environment.")
49
+ logger.warning("If using Docker, ensure the .env file is correctly mounted and formatted.")
50
+ logger.warning("Example format in .env file (no quotes):")
51
+ logger.warning("SECURE_1PSID=your_secure_1psid_value_here")
52
+ logger.warning("SECURE_1PSIDTS=your_secure_1psidts_value_here")
53
+ else:
54
+ # Only log the first few characters for security
55
+ logger.info(f"Credentials found. SECURE_1PSID starts with: {SECURE_1PSID[:5]}...")
56
+ logger.info(f"Credentials found. SECURE_1PSIDTS starts with: {SECURE_1PSIDTS[:5]}...")
57
+
58
+ if not API_KEY:
59
+ logger.warning("⚠️ API_KEY is not set or empty! API authentication will not work.")
60
+ logger.warning("Make sure API_KEY is correctly set in your .env file or environment.")
61
+ else:
62
+ logger.info(f"API_KEY found. API_KEY starts with: {API_KEY[:5]}...")
63
+
64
+
65
+ def correct_markdown(md_text: str) -> str:
66
+ """
67
+ 修正Markdown文本,移除Google搜索链接包装器,并根据显示文本简化目标URL。
68
+ """
69
+
70
+ def simplify_link_target(text_content: str) -> str:
71
+ match_colon_num = re.match(r"([^:]+:\d+)", text_content)
72
+ if match_colon_num:
73
+ return match_colon_num.group(1)
74
+ return text_content
75
+
76
+ def replacer(match: re.Match) -> str:
77
+ outer_open_paren = match.group(1)
78
+ display_text = match.group(2)
79
+
80
+ new_target_url = simplify_link_target(display_text)
81
+ new_link_segment = f"[`{display_text}`]({new_target_url})"
82
+
83
+ if outer_open_paren:
84
+ return f"{outer_open_paren}{new_link_segment})"
85
+ else:
86
+ return new_link_segment
87
+
88
+ pattern = r"(\()?\[`([^`]+?)`\]\((https://www.google.com/search\?q=)(.*?)(?<!\\)\)\)*(\))?"
89
+
90
+ fixed_google_links = re.sub(pattern, replacer, md_text)
91
+ # fix wrapped markdownlink
92
+ pattern = r"`(\[[^\]]+\]\([^\)]+\))`"
93
+ return re.sub(pattern, r"\1", fixed_google_links)
94
+
95
+
96
+ # Pydantic models for API requests and responses
97
+ class ContentItem(BaseModel):
98
+ type: str
99
+ text: Optional[str] = None
100
+ image_url: Optional[Dict[str, str]] = None
101
+
102
+
103
+ class Message(BaseModel):
104
+ role: str
105
+ content: Union[str, List[ContentItem]]
106
+ name: Optional[str] = None
107
+
108
+
109
+ class ChatCompletionRequest(BaseModel):
110
+ model: str
111
+ messages: List[Message]
112
+ temperature: Optional[float] = 0.7
113
+ top_p: Optional[float] = 1.0
114
+ n: Optional[int] = 1
115
+ stream: Optional[bool] = False
116
+ max_tokens: Optional[int] = None
117
+ presence_penalty: Optional[float] = 0
118
+ frequency_penalty: Optional[float] = 0
119
+ user: Optional[str] = None
120
+
121
+
122
+ class Choice(BaseModel):
123
+ index: int
124
+ message: Message
125
+ finish_reason: str
126
+
127
+
128
+ class Usage(BaseModel):
129
+ prompt_tokens: int
130
+ completion_tokens: int
131
+ total_tokens: int
132
+
133
+
134
+ class ChatCompletionResponse(BaseModel):
135
+ id: str
136
+ object: str = "chat.completion"
137
+ created: int
138
+ model: str
139
+ choices: List[Choice]
140
+ usage: Usage
141
+
142
+
143
+ class ModelData(BaseModel):
144
+ id: str
145
+ object: str = "model"
146
+ created: int
147
+ owned_by: str = "google"
148
+
149
+
150
+ class ModelList(BaseModel):
151
+ object: str = "list"
152
+ data: List[ModelData]
153
+
154
+
155
+ # Authentication dependency
156
+ async def verify_api_key(authorization: str = Header(None)):
157
+ if not API_KEY:
158
+ # If API_KEY is not set in environment, skip validation (for development)
159
+ logger.warning("API key validation skipped - no API_KEY set in environment")
160
+ return
161
+
162
+ if not authorization:
163
+ raise HTTPException(status_code=401, detail="Missing Authorization header")
164
+
165
+ try:
166
+ scheme, token = authorization.split()
167
+ if scheme.lower() != "bearer":
168
+ raise HTTPException(status_code=401, detail="Invalid authentication scheme. Use Bearer token")
169
+
170
+ if token != API_KEY:
171
+ raise HTTPException(status_code=401, detail="Invalid API key")
172
+ except ValueError:
173
+ raise HTTPException(status_code=401, detail="Invalid authorization format. Use 'Bearer YOUR_API_KEY'")
174
+
175
+ return token
176
+
177
+
178
+ # Simple error handler middleware
179
+ @app.middleware("http")
180
+ async def error_handling(request: Request, call_next):
181
+ try:
182
+ return await call_next(request)
183
+ except Exception as e:
184
+ logger.error(f"Request failed: {str(e)}")
185
+ return JSONResponse(status_code=500, content={"error": {"message": str(e), "type": "internal_server_error"}})
186
+
187
+
188
+ # Get list of available models
189
+ @app.get("/v1/models")
190
+ async def list_models():
191
+ """返回 gemini_webapi 中声明的模型列表"""
192
+ now = int(datetime.now(tz=timezone.utc).timestamp())
193
+ data = [
194
+ {
195
+ "id": m.model_name, # 如 "gemini-2.0-flash"
196
+ "object": "model",
197
+ "created": now,
198
+ "owned_by": "google-gemini-web",
199
+ }
200
+ for m in Model
201
+ ]
202
+ print(data)
203
+ return {"object": "list", "data": data}
204
+
205
+
206
+ # Helper to convert between Gemini and OpenAI model names
207
+ def map_model_name(openai_model_name: str) -> Model:
208
+ """根据模型名称字符串查找匹配的 Model 枚举值"""
209
+ # 打印所有可用模型以便调试
210
+ all_models = [m.model_name if hasattr(m, "model_name") else str(m) for m in Model]
211
+ logger.info(f"Available models: {all_models}")
212
+
213
+ # 首先尝试直接查找匹配的模型名称
214
+ for m in Model:
215
+ model_name = m.model_name if hasattr(m, "model_name") else str(m)
216
+ if openai_model_name.lower() in model_name.lower():
217
+ return m
218
+
219
+ # 如果找不到匹配项,使用默认映射
220
+ model_keywords = {
221
+ "gemini-pro": ["pro", "2.0"],
222
+ "gemini-pro-vision": ["vision", "pro"],
223
+ "gemini-flash": ["flash", "2.0"],
224
+ "gemini-1.5-pro": ["1.5", "pro"],
225
+ "gemini-1.5-flash": ["1.5", "flash"],
226
+ }
227
+
228
+ # 根据关键词匹配
229
+ keywords = model_keywords.get(openai_model_name, ["pro"]) # 默认使用pro模型
230
+
231
+ for m in Model:
232
+ model_name = m.model_name if hasattr(m, "model_name") else str(m)
233
+ if all(kw.lower() in model_name.lower() for kw in keywords):
234
+ return m
235
+
236
+ # 如果还是找不到,返回第一个模型
237
+ return next(iter(Model))
238
+
239
+
240
+ # Prepare conversation history from OpenAI messages format
241
+ def prepare_conversation(messages: List[Message]) -> tuple:
242
+ conversation = ""
243
+ temp_files = []
244
+
245
+ for msg in messages:
246
+ if isinstance(msg.content, str):
247
+ # String content handling
248
+ if msg.role == "system":
249
+ conversation += f"System: {msg.content}\n\n"
250
+ elif msg.role == "user":
251
+ conversation += f"Human: {msg.content}\n\n"
252
+ elif msg.role == "assistant":
253
+ conversation += f"Assistant: {msg.content}\n\n"
254
+ else:
255
+ # Mixed content handling
256
+ if msg.role == "user":
257
+ conversation += "Human: "
258
+ elif msg.role == "system":
259
+ conversation += "System: "
260
+ elif msg.role == "assistant":
261
+ conversation += "Assistant: "
262
+
263
+ for item in msg.content:
264
+ if item.type == "text":
265
+ conversation += item.text or ""
266
+ elif item.type == "image_url" and item.image_url:
267
+ # Handle image
268
+ image_url = item.image_url.get("url", "")
269
+ if image_url.startswith("data:image/"):
270
+ # Process base64 encoded image
271
+ try:
272
+ # Extract the base64 part
273
+ base64_data = image_url.split(",")[1]
274
+ image_data = base64.b64decode(base64_data)
275
+
276
+ # Create temporary file to hold the image
277
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmp:
278
+ tmp.write(image_data)
279
+ temp_files.append(tmp.name)
280
+ except Exception as e:
281
+ logger.error(f"Error processing base64 image: {str(e)}")
282
+
283
+ conversation += "\n\n"
284
+
285
+ # Add a final prompt for the assistant to respond to
286
+ conversation += "Assistant: "
287
+
288
+ return conversation, temp_files
289
+
290
+
291
+ # Dependency to get the initialized Gemini client
292
+ async def get_gemini_client():
293
+ global gemini_client
294
+ if gemini_client is None:
295
+ try:
296
+ gemini_client = GeminiClient(SECURE_1PSID, SECURE_1PSIDTS)
297
+ await gemini_client.init(timeout=300)
298
+ except Exception as e:
299
+ logger.error(f"Failed to initialize Gemini client: {str(e)}")
300
+ raise HTTPException(status_code=500, detail=f"Failed to initialize Gemini client: {str(e)}")
301
+ return gemini_client
302
+
303
+
304
+ @app.post("/v1/chat/completions")
305
+ async def create_chat_completion(request: ChatCompletionRequest, api_key: str = Depends(verify_api_key)):
306
+ try:
307
+ # 确保客户端已初始化
308
+ global gemini_client
309
+ if gemini_client is None:
310
+ gemini_client = GeminiClient(SECURE_1PSID, SECURE_1PSIDTS)
311
+ await gemini_client.init(timeout=300)
312
+ logger.info("Gemini client initialized successfully")
313
+
314
+ # 转换消息为对话格式
315
+ conversation, temp_files = prepare_conversation(request.messages)
316
+ logger.info(f"Prepared conversation: {conversation}")
317
+ logger.info(f"Temp files: {temp_files}")
318
+
319
+ # 获取适当的模型
320
+ model = map_model_name(request.model)
321
+ logger.info(f"Using model: {model}")
322
+
323
+ # 生成响应
324
+ logger.info("Sending request to Gemini...")
325
+ if temp_files:
326
+ # With files
327
+ response = await gemini_client.generate_content(conversation, files=temp_files, model=model)
328
+ else:
329
+ # Text only
330
+ response = await gemini_client.generate_content(conversation, model=model)
331
+
332
+ # 清理临时文件
333
+ for temp_file in temp_files:
334
+ try:
335
+ os.unlink(temp_file)
336
+ except Exception as e:
337
+ logger.warning(f"Failed to delete temp file {temp_file}: {str(e)}")
338
+
339
+ # 提取文本响应
340
+ reply_text = ""
341
+ # 提取思考内容
342
+ if ENABLE_THINKING and hasattr(response, "thoughts"):
343
+ reply_text += f"<think>{response.thoughts}</think>"
344
+ if hasattr(response, "text"):
345
+ reply_text += response.text
346
+ else:
347
+ reply_text += str(response)
348
+ reply_text = reply_text.replace("&lt;", "<").replace("\\<", "<").replace("\\_", "_").replace("\\>", ">")
349
+ reply_text = correct_markdown(reply_text)
350
+
351
+ logger.info(f"Response: {reply_text}")
352
+
353
+ if not reply_text or reply_text.strip() == "":
354
+ logger.warning("Empty response received from Gemini")
355
+ reply_text = "服务器返回了空响应。请检查 Gemini API 凭据是否有效。"
356
+
357
+ # 创建响应对象
358
+ completion_id = f"chatcmpl-{uuid.uuid4()}"
359
+ created_time = int(time.time())
360
+
361
+ # 检查客户端是否请求流式响应
362
+ if request.stream:
363
+ # 实现流式响应
364
+ async def generate_stream():
365
+ # 创建 SSE 格式的流式响应
366
+ # 先发送开始事件
367
+ data = {
368
+ "id": completion_id,
369
+ "object": "chat.completion.chunk",
370
+ "created": created_time,
371
+ "model": request.model,
372
+ "choices": [{"index": 0, "delta": {"role": "assistant"}, "finish_reason": None}],
373
+ }
374
+ yield f"data: {json.dumps(data)}\n\n"
375
+
376
+ # 模拟流式输出 - 将文本按字符分割发送
377
+ for char in reply_text:
378
+ data = {
379
+ "id": completion_id,
380
+ "object": "chat.completion.chunk",
381
+ "created": created_time,
382
+ "model": request.model,
383
+ "choices": [{"index": 0, "delta": {"content": char}, "finish_reason": None}],
384
+ }
385
+ yield f"data: {json.dumps(data)}\n\n"
386
+ # 可选:添加短暂延迟以模拟真实的流式输出
387
+ await asyncio.sleep(0.01)
388
+
389
+ # 发送结束事件
390
+ data = {
391
+ "id": completion_id,
392
+ "object": "chat.completion.chunk",
393
+ "created": created_time,
394
+ "model": request.model,
395
+ "choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}],
396
+ }
397
+ yield f"data: {json.dumps(data)}\n\n"
398
+ yield "data: [DONE]\n\n"
399
+
400
+ return StreamingResponse(generate_stream(), media_type="text/event-stream")
401
+ else:
402
+ # 非流式响应(原来的逻辑)
403
+ result = {
404
+ "id": completion_id,
405
+ "object": "chat.completion",
406
+ "created": created_time,
407
+ "model": request.model,
408
+ "choices": [{"index": 0, "message": {"role": "assistant", "content": reply_text}, "finish_reason": "stop"}],
409
+ "usage": {
410
+ "prompt_tokens": len(conversation.split()),
411
+ "completion_tokens": len(reply_text.split()),
412
+ "total_tokens": len(conversation.split()) + len(reply_text.split()),
413
+ },
414
+ }
415
+
416
+ logger.info(f"Returning response: {result}")
417
+ return result
418
+
419
+ except Exception as e:
420
+ logger.error(f"Error generating completion: {str(e)}", exc_info=True)
421
+ raise HTTPException(status_code=500, detail=f"Error generating completion: {str(e)}")
422
+
423
+
424
+ @app.get("/")
425
+ async def root():
426
+ return {"status": "online", "message": "Gemini API FastAPI Server is running"}
427
+
428
+
429
+ if __name__ == "__main__":
430
+ import uvicorn
431
+
432
+ uvicorn.run("main:app", host="0.0.0.0", port=8000, log_level="info")
pyproject.toml ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "gemi2api-server"
3
+ version = "0.1.3"
4
+ license = "MIT"
5
+ description = "Add your description here"
6
+ readme = "README.md"
7
+ requires-python = ">=3.11"
8
+ dependencies = [
9
+ "browser-cookie3>=0.20.1",
10
+ "fastapi>=0.115.12",
11
+ "gemini-webapi>=1.17.3",
12
+ "uvicorn[standard]>=0.34.1",
13
+ ]
14
+ [[tool.uv.index]]
15
+ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple"
16
+ default = true
17
+
18
+ [dependency-groups]
19
+ dev = [
20
+ "ruff>=0.11.7",
21
+ ]
22
+
23
+ [tool.ruff]
24
+ line-length = 150 # 设置最大行长度
25
+
26
+ [tool.ruff.lint]
27
+ select = ["E", "F", "W", "I"] # 启用的规则(E: pycodestyle, F: pyflakes, W: pycodestyle warnings, I: isort)
28
+ ignore = ["E501", "W191"] # 忽略特定规则(行长度警告和tab缩进警告)
29
+
30
+ [tool.ruff.format]
31
+ quote-style = "double" # 使用双引号
32
+ indent-style = "tab" # 使用tab缩进