| import gradio as gr |
| from groq import Groq |
|
|
| |
| client = Groq(api_key="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") |
|
|
| def chat_with_groq(message, history, model_choice, temperature, max_tokens): |
| """ |
| Function to interact with Groq API |
| """ |
| try: |
| |
| messages = [] |
| |
| |
| for human, assistant in history: |
| messages.append({"role": "user", "content": human}) |
| messages.append({"role": "assistant", "content": assistant}) |
| |
| |
| messages.append({"role": "user", "content": message}) |
| |
| |
| chat_completion = client.chat.completions.create( |
| messages=messages, |
| model=model_choice, |
| temperature=temperature, |
| max_tokens=max_tokens, |
| top_p=1, |
| stream=False |
| ) |
| |
| |
| response = chat_completion.choices[0].message.content |
| |
| return response |
| |
| except Exception as e: |
| return f"Error: {str(e)}" |
|
|
| |
| available_models = [ |
| "llama-3.1-8b-instant", |
| "llama-3.1-70b-versatile", |
| "mixtral-8x7b-32768", |
| "gemma2-9b-it" |
| ] |
|
|
| def predict(message, chat_history, model_choice, temperature, max_tokens): |
| """ |
| Predict function for Gradio ChatInterface |
| """ |
| if not message.strip(): |
| return chat_history |
| |
| |
| bot_response = chat_with_groq(message, chat_history, model_choice, temperature, max_tokens) |
| |
| |
| chat_history.append((message, bot_response)) |
| |
| return chat_history |
|
|
| |
| custom_css = """ |
| #chatbot { |
| min-height: 500px; |
| } |
| .container { |
| max-width: 1200px; |
| margin: auto; |
| } |
| """ |
|
|
| with gr.Blocks( |
| theme=gr.themes.Soft(), |
| title="Groq AI Chatbot", |
| css=custom_css |
| ) as demo: |
| |
| gr.Markdown( |
| """ |
| # π€ Groq AI Chatbot |
| Fast AI-powered chatbot powered by Groq API |
| |
| **Note**: This chatbot uses Groq's inference API for fast responses. |
| """ |
| ) |
| |
| |
| chatbot = gr.Chatbot( |
| label="Conversation", |
| height=500, |
| show_copy_button=True, |
| elem_id="chatbot" |
| ) |
| |
| with gr.Row(): |
| msg = gr.Textbox( |
| label="Your Message", |
| placeholder="Type your message here and press Enter...", |
| lines=2, |
| scale=4, |
| container=False |
| ) |
| |
| with gr.Row(): |
| send_btn = gr.Button("Send π", variant="primary", size="lg") |
| clear_btn = gr.Button("Clear Chat ποΈ", variant="secondary") |
| |
| with gr.Accordion("βοΈ Model Settings", open=False): |
| with gr.Row(): |
| model_choice = gr.Dropdown( |
| choices=available_models, |
| value="llama-3.1-8b-instant", |
| label="Select Model", |
| info="Choose which AI model to use" |
| ) |
| with gr.Row(): |
| temperature = gr.Slider( |
| minimum=0.1, |
| maximum=1.0, |
| value=0.7, |
| step=0.1, |
| label="Temperature", |
| info="Controls creativity: Lower = more deterministic, Higher = more creative" |
| ) |
| max_tokens = gr.Slider( |
| minimum=100, |
| maximum=4096, |
| value=1024, |
| step=100, |
| label="Max Tokens", |
| info="Maximum length of the response" |
| ) |
| |
| |
| def user_message(user_msg, history): |
| return "", history + [[user_msg, None]] |
| |
| |
| def bot_message(history, model, temp, tokens): |
| user_msg = history[-1][0] |
| bot_msg = chat_with_groq(user_msg, history[:-1], model, temp, tokens) |
| history[-1][1] = bot_msg |
| return history |
| |
| |
| msg.submit( |
| user_message, |
| inputs=[msg, chatbot], |
| outputs=[msg, chatbot] |
| ).then( |
| bot_message, |
| inputs=[chatbot, model_choice, temperature, max_tokens], |
| outputs=[chatbot] |
| ) |
| |
| send_btn.click( |
| user_message, |
| inputs=[msg, chatbot], |
| outputs=[msg, chatbot] |
| ).then( |
| bot_message, |
| inputs=[chatbot, model_choice, temperature, max_tokens], |
| outputs=[chatbot] |
| ) |
| |
| clear_btn.click( |
| lambda: None, |
| outputs=[chatbot], |
| queue=False |
| ).then( |
| lambda: [], |
| None, |
| [chatbot] |
| ) |
| |
| gr.Markdown( |
| """ |
| ### π‘ About the Models: |
| - **Llama 3.1 8B Instant**: Fast and efficient for general conversations |
| - **Llama 3.1 70B Versatile**: More capable for complex tasks |
| - **Mixtral 8x7B**: Excellent for coding and reasoning tasks |
| - **Gemma2 9B**: Balanced performance across various tasks |
| |
| ### β οΈ Important: |
| - Your conversations are processed through Groq's API |
| - The API key is embedded in this application |
| - For personal use only |
| """ |
| ) |
|
|
| if __name__ == "__main__": |
| demo.launch(share=True, debug=True) |