Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,70 +3,57 @@ import gradio as gr
|
|
| 3 |
from openai import OpenAI
|
| 4 |
import os
|
| 5 |
import time
|
| 6 |
-
from typing import List, Tuple
|
| 7 |
|
| 8 |
# ==================== Configuration ====================
|
| 9 |
DEFAULT_SYSTEM_PROMPT = "You are DeepSeek-V4, an advanced AI assistant with strong reasoning capabilities. Provide accurate, helpful, and well-reasoned responses."
|
| 10 |
|
| 11 |
-
# Reasoning effort mapping
|
| 12 |
REASONING_EFFORT_MAP = {
|
| 13 |
"Non-think": "minimal",
|
| 14 |
"Think High": "high",
|
| 15 |
"Think Max": "maximum"
|
| 16 |
}
|
| 17 |
|
| 18 |
-
# Thinking type mapping
|
| 19 |
THINKING_TYPE_MAP = {
|
| 20 |
"Non-think": "disabled",
|
| 21 |
"Think High": "enabled",
|
| 22 |
"Think Max": "enabled"
|
| 23 |
}
|
| 24 |
|
| 25 |
-
# ==================== API Client
|
| 26 |
def get_client():
|
| 27 |
-
"""Initialize DeepSeek API client"""
|
| 28 |
api_key = os.environ.get('DEEPSEEK_API_KEY')
|
| 29 |
if not api_key:
|
| 30 |
raise ValueError(
|
| 31 |
"β οΈ DEEPSEEK_API_KEY not found!\n\n"
|
| 32 |
-
"
|
| 33 |
-
"
|
| 34 |
-
"2. Set environment variable:\n"
|
| 35 |
-
" export DEEPSEEK_API_KEY='your-api-key-here'"
|
| 36 |
)
|
| 37 |
-
|
| 38 |
-
return OpenAI(
|
| 39 |
-
api_key=api_key,
|
| 40 |
-
base_url="https://api.deepseek.com"
|
| 41 |
-
)
|
| 42 |
|
| 43 |
# ==================== Response Generation ====================
|
| 44 |
def generate_response(
|
| 45 |
message: str,
|
| 46 |
-
history:
|
| 47 |
-
thinking_mode: str
|
| 48 |
-
max_tokens: int
|
| 49 |
-
temperature: float
|
| 50 |
-
top_p: float
|
| 51 |
-
system_prompt: str
|
| 52 |
-
show_thinking: bool
|
| 53 |
):
|
| 54 |
-
"""Generate response using DeepSeek API"""
|
| 55 |
if not message.strip():
|
| 56 |
-
yield
|
| 57 |
return
|
| 58 |
|
| 59 |
client = get_client()
|
| 60 |
|
| 61 |
-
# Build messages
|
| 62 |
-
|
| 63 |
|
| 64 |
-
for
|
| 65 |
-
|
| 66 |
-
if assistant_msg:
|
| 67 |
-
messages.append({"role": "assistant", "content": assistant_msg})
|
| 68 |
|
| 69 |
-
|
| 70 |
|
| 71 |
reasoning_effort = REASONING_EFFORT_MAP.get(thinking_mode, "high")
|
| 72 |
thinking_type = THINKING_TYPE_MAP.get(thinking_mode, "enabled")
|
|
@@ -74,10 +61,9 @@ def generate_response(
|
|
| 74 |
try:
|
| 75 |
start_time = time.time()
|
| 76 |
|
| 77 |
-
# Streaming call
|
| 78 |
stream = client.chat.completions.create(
|
| 79 |
model="deepseek-v4-pro",
|
| 80 |
-
messages=
|
| 81 |
stream=True,
|
| 82 |
max_tokens=max_tokens,
|
| 83 |
temperature=temperature,
|
|
@@ -90,53 +76,66 @@ def generate_response(
|
|
| 90 |
thinking_chunks = []
|
| 91 |
|
| 92 |
for chunk in stream:
|
| 93 |
-
|
| 94 |
-
|
|
|
|
|
|
|
| 95 |
|
| 96 |
-
if hasattr(
|
| 97 |
-
|
| 98 |
-
thinking_chunks.append(chunk.choices[0].delta.reasoning_content)
|
| 99 |
|
| 100 |
current_content = ''.join(content_chunks)
|
| 101 |
current_thinking = ''.join(thinking_chunks)
|
| 102 |
|
| 103 |
-
|
| 104 |
if show_thinking and current_thinking:
|
| 105 |
-
|
|
|
|
|
|
|
| 106 |
|
| 107 |
elapsed = time.time() - start_time
|
| 108 |
-
status = f"π Streaming... ({elapsed:.1f}s)"
|
| 109 |
|
| 110 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 111 |
|
| 112 |
-
# Final
|
| 113 |
-
end_time = time.time()
|
| 114 |
final_content = ''.join(content_chunks)
|
| 115 |
final_thinking = ''.join(thinking_chunks)
|
| 116 |
|
| 117 |
-
full_response = final_content
|
| 118 |
if show_thinking and final_thinking:
|
| 119 |
-
|
|
|
|
|
|
|
| 120 |
|
| 121 |
-
|
| 122 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 123 |
|
| 124 |
except Exception as e:
|
| 125 |
error_msg = f"β Error: {str(e)}"
|
| 126 |
-
|
|
|
|
|
|
|
|
|
|
| 127 |
|
| 128 |
# ==================== Gradio Interface ====================
|
| 129 |
def create_demo():
|
| 130 |
-
"""Create the Gradio interface"""
|
| 131 |
-
|
| 132 |
with gr.Blocks(title="DeepSeek-V4 Pro Demo") as demo:
|
| 133 |
|
| 134 |
# Header
|
| 135 |
gr.Markdown("""
|
| 136 |
# π DeepSeek-V4 Pro
|
| 137 |
-
**Towards Highly Efficient Million-Token Context Intelligence**
|
| 138 |
|
| 139 |
-
|
|
|
|
| 140 |
""")
|
| 141 |
|
| 142 |
with gr.Row():
|
|
@@ -147,12 +146,13 @@ def create_demo():
|
|
| 147 |
thinking_mode = gr.Radio(
|
| 148 |
choices=["Non-think", "Think High", "Think Max"],
|
| 149 |
value="Think High",
|
| 150 |
-
label="π§ Reasoning Mode"
|
|
|
|
| 151 |
)
|
| 152 |
|
| 153 |
show_thinking = gr.Checkbox(
|
| 154 |
value=True,
|
| 155 |
-
label="π Show
|
| 156 |
)
|
| 157 |
|
| 158 |
system_prompt = gr.Textbox(
|
|
@@ -161,42 +161,38 @@ def create_demo():
|
|
| 161 |
lines=3
|
| 162 |
)
|
| 163 |
|
| 164 |
-
with gr.Accordion("π§ Advanced", open=False):
|
| 165 |
max_tokens = gr.Slider(64, 32768, value=4096, step=64, label="Max Tokens")
|
| 166 |
temperature = gr.Slider(0.0, 2.0, value=0.7, step=0.05, label="Temperature")
|
| 167 |
top_p = gr.Slider(0.0, 1.0, value=1.0, step=0.05, label="Top P")
|
| 168 |
-
|
| 169 |
-
gr.Markdown("### π‘ Examples")
|
| 170 |
-
gr.Examples(
|
| 171 |
-
examples=[
|
| 172 |
-
"Explain quantum computing simply",
|
| 173 |
-
"Write a Python Fibonacci function",
|
| 174 |
-
"What's new in DeepSeek-V4?",
|
| 175 |
-
],
|
| 176 |
-
inputs=gr.Textbox(label="Try an example", visible=False),
|
| 177 |
-
)
|
| 178 |
|
| 179 |
-
# Right - Chat
|
| 180 |
with gr.Column(scale=2):
|
| 181 |
chatbot = gr.Chatbot(
|
| 182 |
label="π¬ Chat with DeepSeek-V4 Pro",
|
| 183 |
-
height=500
|
|
|
|
| 184 |
)
|
| 185 |
|
| 186 |
with gr.Accordion("π§ Thinking Process", open=True):
|
| 187 |
-
thinking_display = gr.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 188 |
|
| 189 |
with gr.Row():
|
| 190 |
message_input = gr.Textbox(
|
| 191 |
-
label="Message",
|
| 192 |
-
placeholder="Type your message...",
|
| 193 |
lines=2,
|
| 194 |
scale=9
|
| 195 |
)
|
| 196 |
send_btn = gr.Button("π Send", variant="primary", scale=1)
|
| 197 |
|
| 198 |
with gr.Row():
|
| 199 |
-
clear_btn = gr.Button("ποΈ Clear", size="sm")
|
| 200 |
|
| 201 |
status_display = gr.Textbox(
|
| 202 |
label="Status",
|
|
@@ -207,53 +203,62 @@ def create_demo():
|
|
| 207 |
# Footer
|
| 208 |
gr.Markdown("""
|
| 209 |
---
|
| 210 |
-
[
|
| 211 |
-
[
|
| 212 |
-
|
| 213 |
""")
|
| 214 |
|
| 215 |
# ==================== Event Handlers ====================
|
| 216 |
|
| 217 |
-
def
|
| 218 |
message, history, thinking_mode, show_thinking,
|
| 219 |
system_prompt, max_tokens, temperature, top_p
|
| 220 |
):
|
| 221 |
if not message.strip():
|
| 222 |
-
yield
|
| 223 |
return
|
| 224 |
|
| 225 |
if not os.environ.get('DEEPSEEK_API_KEY'):
|
| 226 |
-
|
|
|
|
|
|
|
|
|
|
| 227 |
history = history or []
|
| 228 |
-
history.append(
|
| 229 |
-
|
|
|
|
| 230 |
return
|
| 231 |
|
| 232 |
history = history or []
|
| 233 |
|
| 234 |
-
for
|
| 235 |
message, history, thinking_mode, max_tokens,
|
| 236 |
temperature, top_p, system_prompt, show_thinking
|
| 237 |
):
|
| 238 |
-
yield
|
| 239 |
|
| 240 |
-
# Events
|
| 241 |
send_btn.click(
|
| 242 |
-
fn=
|
| 243 |
inputs=[message_input, chatbot, thinking_mode, show_thinking,
|
| 244 |
system_prompt, max_tokens, temperature, top_p],
|
| 245 |
-
outputs=[
|
|
|
|
|
|
|
|
|
|
| 246 |
)
|
| 247 |
|
| 248 |
message_input.submit(
|
| 249 |
-
fn=
|
| 250 |
inputs=[message_input, chatbot, thinking_mode, show_thinking,
|
| 251 |
system_prompt, max_tokens, temperature, top_p],
|
| 252 |
-
outputs=[
|
|
|
|
|
|
|
|
|
|
| 253 |
)
|
| 254 |
|
| 255 |
clear_btn.click(
|
| 256 |
-
fn=lambda: ([], "*
|
| 257 |
outputs=[chatbot, thinking_display, status_display]
|
| 258 |
)
|
| 259 |
|
|
@@ -272,18 +277,12 @@ if __name__ == "__main__":
|
|
| 272 |
print("\n" + "=" * 50)
|
| 273 |
print("β οΈ DEEPSEEK_API_KEY not found!")
|
| 274 |
print("=" * 50)
|
| 275 |
-
print("Get
|
| 276 |
-
print("
|
| 277 |
|
| 278 |
demo = create_demo()
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
server_port=7860,
|
| 285 |
-
share=False
|
| 286 |
-
)
|
| 287 |
-
except TypeError:
|
| 288 |
-
# Fallback: simplest launch
|
| 289 |
-
demo.queue().launch()
|
|
|
|
| 3 |
from openai import OpenAI
|
| 4 |
import os
|
| 5 |
import time
|
|
|
|
| 6 |
|
| 7 |
# ==================== Configuration ====================
|
| 8 |
DEFAULT_SYSTEM_PROMPT = "You are DeepSeek-V4, an advanced AI assistant with strong reasoning capabilities. Provide accurate, helpful, and well-reasoned responses."
|
| 9 |
|
|
|
|
| 10 |
REASONING_EFFORT_MAP = {
|
| 11 |
"Non-think": "minimal",
|
| 12 |
"Think High": "high",
|
| 13 |
"Think Max": "maximum"
|
| 14 |
}
|
| 15 |
|
|
|
|
| 16 |
THINKING_TYPE_MAP = {
|
| 17 |
"Non-think": "disabled",
|
| 18 |
"Think High": "enabled",
|
| 19 |
"Think Max": "enabled"
|
| 20 |
}
|
| 21 |
|
| 22 |
+
# ==================== API Client ====================
|
| 23 |
def get_client():
|
|
|
|
| 24 |
api_key = os.environ.get('DEEPSEEK_API_KEY')
|
| 25 |
if not api_key:
|
| 26 |
raise ValueError(
|
| 27 |
"β οΈ DEEPSEEK_API_KEY not found!\n\n"
|
| 28 |
+
"Get your key: https://platform.deepseek.com/api_keys\n"
|
| 29 |
+
"Then set: DEEPSEEK_API_KEY=your-key-here"
|
|
|
|
|
|
|
| 30 |
)
|
| 31 |
+
return OpenAI(api_key=api_key, base_url="https://api.deepseek.com")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
# ==================== Response Generation ====================
|
| 34 |
def generate_response(
|
| 35 |
message: str,
|
| 36 |
+
history: list,
|
| 37 |
+
thinking_mode: str,
|
| 38 |
+
max_tokens: int,
|
| 39 |
+
temperature: float,
|
| 40 |
+
top_p: float,
|
| 41 |
+
system_prompt: str,
|
| 42 |
+
show_thinking: bool
|
| 43 |
):
|
|
|
|
| 44 |
if not message.strip():
|
| 45 |
+
yield history, "", "Please enter a message."
|
| 46 |
return
|
| 47 |
|
| 48 |
client = get_client()
|
| 49 |
|
| 50 |
+
# Build messages from history (dictionaries)
|
| 51 |
+
api_messages = [{"role": "system", "content": system_prompt}]
|
| 52 |
|
| 53 |
+
for msg in history:
|
| 54 |
+
api_messages.append({"role": msg["role"], "content": msg["content"]})
|
|
|
|
|
|
|
| 55 |
|
| 56 |
+
api_messages.append({"role": "user", "content": message})
|
| 57 |
|
| 58 |
reasoning_effort = REASONING_EFFORT_MAP.get(thinking_mode, "high")
|
| 59 |
thinking_type = THINKING_TYPE_MAP.get(thinking_mode, "enabled")
|
|
|
|
| 61 |
try:
|
| 62 |
start_time = time.time()
|
| 63 |
|
|
|
|
| 64 |
stream = client.chat.completions.create(
|
| 65 |
model="deepseek-v4-pro",
|
| 66 |
+
messages=api_messages,
|
| 67 |
stream=True,
|
| 68 |
max_tokens=max_tokens,
|
| 69 |
temperature=temperature,
|
|
|
|
| 76 |
thinking_chunks = []
|
| 77 |
|
| 78 |
for chunk in stream:
|
| 79 |
+
delta = chunk.choices[0].delta
|
| 80 |
+
|
| 81 |
+
if delta.content:
|
| 82 |
+
content_chunks.append(delta.content)
|
| 83 |
|
| 84 |
+
if hasattr(delta, 'reasoning_content') and delta.reasoning_content:
|
| 85 |
+
thinking_chunks.append(delta.reasoning_content)
|
|
|
|
| 86 |
|
| 87 |
current_content = ''.join(content_chunks)
|
| 88 |
current_thinking = ''.join(thinking_chunks)
|
| 89 |
|
| 90 |
+
# Build response text
|
| 91 |
if show_thinking and current_thinking:
|
| 92 |
+
response_text = f"**[Thinking]**\n{current_thinking}\n\n**[Response]**\n{current_content}"
|
| 93 |
+
else:
|
| 94 |
+
response_text = current_content
|
| 95 |
|
| 96 |
elapsed = time.time() - start_time
|
|
|
|
| 97 |
|
| 98 |
+
# Update history with current state
|
| 99 |
+
new_history = history.copy()
|
| 100 |
+
new_history.append({"role": "user", "content": message})
|
| 101 |
+
new_history.append({"role": "assistant", "content": response_text})
|
| 102 |
+
|
| 103 |
+
yield new_history, current_thinking, f"π Streaming... ({elapsed:.1f}s)"
|
| 104 |
|
| 105 |
+
# Final response
|
|
|
|
| 106 |
final_content = ''.join(content_chunks)
|
| 107 |
final_thinking = ''.join(thinking_chunks)
|
| 108 |
|
|
|
|
| 109 |
if show_thinking and final_thinking:
|
| 110 |
+
final_response = f"**[Thinking]**\n{final_thinking}\n\n**[Response]**\n{final_content}"
|
| 111 |
+
else:
|
| 112 |
+
final_response = final_content
|
| 113 |
|
| 114 |
+
end_time = time.time()
|
| 115 |
+
|
| 116 |
+
final_history = history.copy()
|
| 117 |
+
final_history.append({"role": "user", "content": message})
|
| 118 |
+
final_history.append({"role": "assistant", "content": final_response})
|
| 119 |
+
|
| 120 |
+
yield final_history, final_thinking, f"β
Done in {end_time - start_time:.2f}s"
|
| 121 |
|
| 122 |
except Exception as e:
|
| 123 |
error_msg = f"β Error: {str(e)}"
|
| 124 |
+
new_history = history.copy()
|
| 125 |
+
new_history.append({"role": "user", "content": message})
|
| 126 |
+
new_history.append({"role": "assistant", "content": error_msg})
|
| 127 |
+
yield new_history, "", error_msg
|
| 128 |
|
| 129 |
# ==================== Gradio Interface ====================
|
| 130 |
def create_demo():
|
|
|
|
|
|
|
| 131 |
with gr.Blocks(title="DeepSeek-V4 Pro Demo") as demo:
|
| 132 |
|
| 133 |
# Header
|
| 134 |
gr.Markdown("""
|
| 135 |
# π DeepSeek-V4 Pro
|
|
|
|
| 136 |
|
| 137 |
+
**Million-Token Context Intelligence**
|
| 138 |
+
1.6T Parameters | 49B Activated | 1M Context Length
|
| 139 |
""")
|
| 140 |
|
| 141 |
with gr.Row():
|
|
|
|
| 146 |
thinking_mode = gr.Radio(
|
| 147 |
choices=["Non-think", "Think High", "Think Max"],
|
| 148 |
value="Think High",
|
| 149 |
+
label="π§ Reasoning Mode",
|
| 150 |
+
info="Non-think: Fast | Think High: Analysis | Think Max: Deep reasoning"
|
| 151 |
)
|
| 152 |
|
| 153 |
show_thinking = gr.Checkbox(
|
| 154 |
value=True,
|
| 155 |
+
label="π Show thinking process"
|
| 156 |
)
|
| 157 |
|
| 158 |
system_prompt = gr.Textbox(
|
|
|
|
| 161 |
lines=3
|
| 162 |
)
|
| 163 |
|
| 164 |
+
with gr.Accordion("π§ Advanced Parameters", open=False):
|
| 165 |
max_tokens = gr.Slider(64, 32768, value=4096, step=64, label="Max Tokens")
|
| 166 |
temperature = gr.Slider(0.0, 2.0, value=0.7, step=0.05, label="Temperature")
|
| 167 |
top_p = gr.Slider(0.0, 1.0, value=1.0, step=0.05, label="Top P")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 168 |
|
| 169 |
+
# Right - Chat interface
|
| 170 |
with gr.Column(scale=2):
|
| 171 |
chatbot = gr.Chatbot(
|
| 172 |
label="π¬ Chat with DeepSeek-V4 Pro",
|
| 173 |
+
height=500,
|
| 174 |
+
type="messages"
|
| 175 |
)
|
| 176 |
|
| 177 |
with gr.Accordion("π§ Thinking Process", open=True):
|
| 178 |
+
thinking_display = gr.Textbox(
|
| 179 |
+
label="Reasoning",
|
| 180 |
+
value="*Waiting for input...*",
|
| 181 |
+
lines=5,
|
| 182 |
+
interactive=False
|
| 183 |
+
)
|
| 184 |
|
| 185 |
with gr.Row():
|
| 186 |
message_input = gr.Textbox(
|
| 187 |
+
label="Your Message",
|
| 188 |
+
placeholder="Type your message here...",
|
| 189 |
lines=2,
|
| 190 |
scale=9
|
| 191 |
)
|
| 192 |
send_btn = gr.Button("π Send", variant="primary", scale=1)
|
| 193 |
|
| 194 |
with gr.Row():
|
| 195 |
+
clear_btn = gr.Button("ποΈ Clear Chat", size="sm")
|
| 196 |
|
| 197 |
status_display = gr.Textbox(
|
| 198 |
label="Status",
|
|
|
|
| 203 |
# Footer
|
| 204 |
gr.Markdown("""
|
| 205 |
---
|
| 206 |
+
π [Get API Key](https://platform.deepseek.com/api_keys) |
|
| 207 |
+
π¦ [Model Card](https://huggingface.co/deepseek-ai/DeepSeek-V4-Pro) |
|
| 208 |
+
π [API Docs](https://platform.deepseek.com/docs)
|
| 209 |
""")
|
| 210 |
|
| 211 |
# ==================== Event Handlers ====================
|
| 212 |
|
| 213 |
+
def on_send(
|
| 214 |
message, history, thinking_mode, show_thinking,
|
| 215 |
system_prompt, max_tokens, temperature, top_p
|
| 216 |
):
|
| 217 |
if not message.strip():
|
| 218 |
+
yield history, "*Waiting for input...*", "Please enter a message."
|
| 219 |
return
|
| 220 |
|
| 221 |
if not os.environ.get('DEEPSEEK_API_KEY'):
|
| 222 |
+
error_msg = {
|
| 223 |
+
"role": "assistant",
|
| 224 |
+
"content": "β οΈ **API Key Missing**\n\nPlease set `DEEPSEEK_API_KEY` environment variable.\nGet one: https://platform.deepseek.com/api_keys"
|
| 225 |
+
}
|
| 226 |
history = history or []
|
| 227 |
+
history.append({"role": "user", "content": message})
|
| 228 |
+
history.append(error_msg)
|
| 229 |
+
yield history, "*API Key not configured*", "β Error"
|
| 230 |
return
|
| 231 |
|
| 232 |
history = history or []
|
| 233 |
|
| 234 |
+
for hist, thinking, status in generate_response(
|
| 235 |
message, history, thinking_mode, max_tokens,
|
| 236 |
temperature, top_p, system_prompt, show_thinking
|
| 237 |
):
|
| 238 |
+
yield hist, thinking if thinking else "*No reasoning to display*", status
|
| 239 |
|
|
|
|
| 240 |
send_btn.click(
|
| 241 |
+
fn=on_send,
|
| 242 |
inputs=[message_input, chatbot, thinking_mode, show_thinking,
|
| 243 |
system_prompt, max_tokens, temperature, top_p],
|
| 244 |
+
outputs=[chatbot, thinking_display, status_display]
|
| 245 |
+
).then(
|
| 246 |
+
fn=lambda: "",
|
| 247 |
+
outputs=[message_input]
|
| 248 |
)
|
| 249 |
|
| 250 |
message_input.submit(
|
| 251 |
+
fn=on_send,
|
| 252 |
inputs=[message_input, chatbot, thinking_mode, show_thinking,
|
| 253 |
system_prompt, max_tokens, temperature, top_p],
|
| 254 |
+
outputs=[chatbot, thinking_display, status_display]
|
| 255 |
+
).then(
|
| 256 |
+
fn=lambda: "",
|
| 257 |
+
outputs=[message_input]
|
| 258 |
)
|
| 259 |
|
| 260 |
clear_btn.click(
|
| 261 |
+
fn=lambda: ([], "*Chat cleared*", "β
Cleared"),
|
| 262 |
outputs=[chatbot, thinking_display, status_display]
|
| 263 |
)
|
| 264 |
|
|
|
|
| 277 |
print("\n" + "=" * 50)
|
| 278 |
print("β οΈ DEEPSEEK_API_KEY not found!")
|
| 279 |
print("=" * 50)
|
| 280 |
+
print("1. Get key: https://platform.deepseek.com/api_keys")
|
| 281 |
+
print("2. Set: export DEEPSEEK_API_KEY='your-key'\n")
|
| 282 |
|
| 283 |
demo = create_demo()
|
| 284 |
+
demo.queue(max_size=50).launch(
|
| 285 |
+
server_name="0.0.0.0",
|
| 286 |
+
server_port=7860,
|
| 287 |
+
share=False
|
| 288 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|