qa1145 commited on
Commit
f0fcefb
·
verified ·
1 Parent(s): fddfb1e

Upload 9 files

Browse files
Files changed (1) hide show
  1. app.py +2 -51
app.py CHANGED
@@ -66,31 +66,6 @@ async def list_models():
66
  }
67
 
68
 
69
- def parse_openrouter_response(response_data: dict) -> str:
70
- """从OpenRouter响应中提取内容"""
71
- content = ""
72
-
73
- # 标准OpenAI格式
74
- if "choices" in response_data and response_data["choices"]:
75
- choices = response_data["choices"]
76
- if choices:
77
- msg = choices[0].get("message", {})
78
- content = msg.get("content", "")
79
- if not content:
80
- # 可能是delta格式
81
- delta = choices[0].get("delta", {})
82
- content = delta.get("content", "")
83
-
84
- # 直接返回的情况
85
- if not content and "message" in response_data:
86
- content = response_data.get("message", {}).get("content", "")
87
-
88
- if not content and "content" in response_data:
89
- content = response_data.get("content", "")
90
-
91
- return content
92
-
93
-
94
  @fastapi_app.post("/v1/chat/completions")
95
  async def chat_completions(request: ChatCompletionRequest):
96
  prompt = request.messages[-1].content if request.messages else ""
@@ -107,32 +82,8 @@ async def chat_completions(request: ChatCompletionRequest):
107
  if not result.get("success"):
108
  raise HTTPException(status_code=400, detail=result.get("error", "Request failed"))
109
 
110
- response_data = result.get("response", {})
111
- print(f"[chat_completions] response_data: {response_data}")
112
- content = parse_openrouter_response(response_data)
113
- print(f"[chat_completions] parsed content: '{content}'")
114
-
115
- return {
116
- "id": response_data.get("id", f"chatcmpl-{random.randint(100000, 999999)}"),
117
- "object": "chat.completion",
118
- "created": int(datetime.now().timestamp()),
119
- "model": result.get("model", request.model or "unknown"),
120
- "choices": [
121
- {
122
- "index": 0,
123
- "message": {
124
- "role": "assistant",
125
- "content": content
126
- },
127
- "finish_reason": "stop"
128
- }
129
- ],
130
- "usage": {
131
- "prompt_tokens": len(prompt),
132
- "completion_tokens": 10,
133
- "total_tokens": len(prompt) + 10
134
- }
135
- }
136
 
137
 
138
  async def stream_chat(model_hint: Optional[str], messages: list):
 
66
  }
67
 
68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  @fastapi_app.post("/v1/chat/completions")
70
  async def chat_completions(request: ChatCompletionRequest):
71
  prompt = request.messages[-1].content if request.messages else ""
 
82
  if not result.get("success"):
83
  raise HTTPException(status_code=400, detail=result.get("error", "Request failed"))
84
 
85
+ # 直接返回原始响应
86
+ return result.get("response", {})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
 
88
 
89
  async def stream_chat(model_hint: Optional[str], messages: list):