| import gradio as gr |
| import os |
| from huggingface_hub import InferenceClient |
|
|
| |
| api_key = os.getenv("HUGGINGFACEHUB_API_TOKEN") |
|
|
| |
| client = InferenceClient( |
| model="mistralai/Mistral-7B-Instruct-v0.2", |
| token=api_key |
| ) |
|
|
| |
| def generate_response(prompt): |
| response = client.text_generation(prompt, max_new_tokens=200) |
| return response |
|
|
| |
| iface = gr.Interface( |
| fn=generate_response, |
| inputs=gr.Textbox(placeholder="Ask me anything..."), |
| outputs="text", |
| title="Mistral-7B Chatbot", |
| description="Enter a question and get a response from Mistral-7B." |
| ) |
|
|
| |
| iface.launch() |
|
|