Geoeasy commited on
Commit
72721bf
·
verified ·
1 Parent(s): b334b44

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +142 -0
app.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from openai import OpenAI, OpenAIError
3
+
4
+ # Global message history
5
+ history = []
6
+
7
+ # Main chatbot function
8
+ # Now accepts api_key provided by the user
9
+ def chatbot(user_input, api_key, temperature, top_p, max_tokens):
10
+ global history
11
+ # Ignore empty input
12
+ if not user_input:
13
+ return history, ''
14
+
15
+ # Instantiate OpenAI/NVIDIA client with user-provided key
16
+ client = OpenAI(
17
+ base_url="https://integrate.api.nvidia.com/v1",
18
+ api_key=api_key
19
+ )
20
+
21
+ # Add user message to history
22
+ history.append({"role": "user", "content": user_input})
23
+ # Ensure system message at start
24
+ if len(history) == 1:
25
+ history.insert(0, {
26
+ "role": "system",
27
+ "content": "You are a helpful assistant that explains complex topics clearly."
28
+ })
29
+
30
+ try:
31
+ # Stream response
32
+ response_stream = client.chat.completions.create(
33
+ model="meta/llama3-8b-instruct",
34
+ messages=history,
35
+ temperature=temperature,
36
+ top_p=top_p,
37
+ max_tokens=max_tokens,
38
+ stream=True
39
+ )
40
+ assistant_reply = ""
41
+ for chunk in response_stream:
42
+ delta = chunk.choices[0].delta
43
+ if delta and delta.content:
44
+ assistant_reply += delta.content
45
+ except OpenAIError as e:
46
+ assistant_reply = f"⚠️ API Error: {e.__class__.__name__}: {e}"
47
+
48
+ # Store assistant response and prepare display history
49
+ history.append({"role": "assistant", "content": assistant_reply})
50
+ display = [
51
+ {"role": msg["role"], "content": msg["content"]}
52
+ for msg in history if msg["role"] in ["user", "assistant"]
53
+ ]
54
+ return display, ''
55
+
56
+ # Clear conversation history
57
+ def clear_history():
58
+ global history
59
+ history = []
60
+ return [], ''
61
+
62
+ # Custom CSS for cleaner, centered layout
63
+ custom_css = r"""
64
+ #header {
65
+ text-align: center;
66
+ margin-bottom: 1rem;
67
+ }
68
+ #title {
69
+ font-size: 2rem;
70
+ margin: 0;
71
+ }
72
+ #chatbot {
73
+ border: none;
74
+ background-color: #f9f9f9;
75
+ }
76
+ footer {
77
+ visibility: hidden;
78
+ }
79
+ """
80
+
81
+ with gr.Blocks(css=custom_css, theme=gr.themes.Base()) as demo:
82
+ # Centered header
83
+ with gr.Row(elem_id="header"):
84
+ gr.Markdown("<h1 id='title'>🌐 GeoChat</h1>")
85
+
86
+ # Main layout: chat + settings
87
+ with gr.Row():
88
+ with gr.Column(scale=4, min_width=600):
89
+ chatbot_ui = gr.Chatbot(elem_id="chatbot", label="Assistant", height=500, type="messages")
90
+ with gr.Row():
91
+ txt = gr.Textbox(
92
+ placeholder="Type your question and press Send...",
93
+ show_label=False,
94
+ lines=2
95
+ )
96
+ btn = gr.Button("Send")
97
+ with gr.Row():
98
+ clear_btn = gr.Button("Clear")
99
+
100
+ with gr.Column(scale=1, min_width=200):
101
+ gr.Markdown(
102
+ """
103
+ ### 🔑 API Key
104
+ Get your NVIDIA API Key at [NVIDIA NGC API Keys](https://org.ngc.nvidia.com/setup/api-keys)
105
+ """
106
+ )
107
+ api_key_input = gr.Textbox(
108
+ label="NVIDIA API Key",
109
+ placeholder="Enter your key here",
110
+ type="password",
111
+ show_label=True
112
+ )
113
+ gr.Markdown("### ⚙️ Settings")
114
+ temp_slider = gr.Slider(0, 1, value=0.6, label="Temperature")
115
+ top_p_slider = gr.Slider(0, 1, value=0.95, label="Top-p")
116
+ max_tokens_slider = gr.Slider(64, 2048, value=1024, step=64, label="Max Tokens")
117
+ gr.Markdown(
118
+ """
119
+ **Temperature:** controls the randomness of the responses; lower values make output more deterministic.
120
+
121
+ **Top-p:** sets the cumulative probability for nucleus sampling; lower values focus on fewer tokens.
122
+
123
+ **Max Tokens:** maximum number of tokens the model can generate in the response.
124
+ """
125
+ )
126
+
127
+ # Interaction events
128
+ btn.click(
129
+ fn=chatbot,
130
+ inputs=[txt, api_key_input, temp_slider, top_p_slider, max_tokens_slider],
131
+ outputs=[chatbot_ui, txt]
132
+ )
133
+ txt.submit(
134
+ fn=chatbot,
135
+ inputs=[txt, api_key_input, temp_slider, top_p_slider, max_tokens_slider],
136
+ outputs=[chatbot_ui, txt]
137
+ )
138
+ clear_btn.click(fn=clear_history, outputs=[chatbot_ui, txt])
139
+
140
+ # Run locally and open browser automatically
141
+ if __name__ == "__main__":
142
+ demo.launch()