Spaces:
Runtime error
Runtime error
feat: implement error handling and status messaging for streaming chat completions
Browse files- app.py +14 -7
- index.html +11 -4
app.py
CHANGED
|
@@ -24,6 +24,8 @@ def chat(messages: list, system_prompt: str = "") -> str:
|
|
| 24 |
for msg in messages:
|
| 25 |
formatted.append({"role": msg["role"], "content": msg["content"]})
|
| 26 |
completion = client.chat.completions.create(model=MODEL, messages=formatted)
|
|
|
|
|
|
|
| 27 |
return completion.choices[0].message.content
|
| 28 |
|
| 29 |
|
|
@@ -39,13 +41,18 @@ async def stream_chat(request: Request):
|
|
| 39 |
formatted.append({"role": msg["role"], "content": msg["content"]})
|
| 40 |
|
| 41 |
def event_stream():
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
yield "data: [DONE]\n\n"
|
| 50 |
|
| 51 |
return StreamingResponse(event_stream(), media_type="text/event-stream")
|
|
|
|
| 24 |
for msg in messages:
|
| 25 |
formatted.append({"role": msg["role"], "content": msg["content"]})
|
| 26 |
completion = client.chat.completions.create(model=MODEL, messages=formatted)
|
| 27 |
+
if not completion.choices:
|
| 28 |
+
return "Error: No response from model."
|
| 29 |
return completion.choices[0].message.content
|
| 30 |
|
| 31 |
|
|
|
|
| 41 |
formatted.append({"role": msg["role"], "content": msg["content"]})
|
| 42 |
|
| 43 |
def event_stream():
|
| 44 |
+
try:
|
| 45 |
+
stream = client.chat.completions.create(
|
| 46 |
+
model=MODEL, messages=formatted, stream=True
|
| 47 |
+
)
|
| 48 |
+
for chunk in stream:
|
| 49 |
+
if not chunk.choices:
|
| 50 |
+
continue
|
| 51 |
+
delta = chunk.choices[0].delta
|
| 52 |
+
if delta.content:
|
| 53 |
+
yield f"data: {json.dumps({'token': delta.content})}\n\n"
|
| 54 |
+
except Exception as e:
|
| 55 |
+
yield f"data: {json.dumps({'error': str(e)})}\n\n"
|
| 56 |
yield "data: [DONE]\n\n"
|
| 57 |
|
| 58 |
return StreamingResponse(event_stream(), media_type="text/event-stream")
|
index.html
CHANGED
|
@@ -684,13 +684,20 @@
|
|
| 684 |
|
| 685 |
for (const line of lines) {
|
| 686 |
if (line.startsWith('data: ')) {
|
| 687 |
-
const data = line.slice(6);
|
| 688 |
if (data === '[DONE]') break;
|
| 689 |
try {
|
| 690 |
const parsed = JSON.parse(data);
|
| 691 |
-
|
| 692 |
-
|
| 693 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 694 |
} catch (e) {}
|
| 695 |
}
|
| 696 |
}
|
|
|
|
| 684 |
|
| 685 |
for (const line of lines) {
|
| 686 |
if (line.startsWith('data: ')) {
|
| 687 |
+
const data = line.slice(6).trim();
|
| 688 |
if (data === '[DONE]') break;
|
| 689 |
try {
|
| 690 |
const parsed = JSON.parse(data);
|
| 691 |
+
if (parsed.error) {
|
| 692 |
+
accumulated = `<span style="color: #ef4444;">Error: ${parsed.error}</span>`;
|
| 693 |
+
aiMsgEl.innerHTML = accumulated;
|
| 694 |
+
break;
|
| 695 |
+
}
|
| 696 |
+
if (parsed.token) {
|
| 697 |
+
accumulated += parsed.token;
|
| 698 |
+
aiMsgEl.innerHTML = marked.parse(accumulated);
|
| 699 |
+
chatContainer.scrollTop = chatContainer.scrollHeight;
|
| 700 |
+
}
|
| 701 |
} catch (e) {}
|
| 702 |
}
|
| 703 |
}
|