| from openai import OpenAI |
| import streamlit as st |
| import os |
| import sys |
| from dotenv import load_dotenv, dotenv_values |
| load_dotenv() |
|
|
|
|
| |
| client = OpenAI( |
| base_url="https://api-inference.huggingface.co/v1", |
| api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN') |
| ) |
|
|
|
|
| st.title("π¬ Ask") |
| st.caption("π A streamlit chatbot powered by Nedum") |
|
|
| |
| if 'messages' not in st.session_state: |
| st.session_state['messages'] = [] |
|
|
| |
| for messasge in st.session_state.messages: |
| st.chat_message(messasge["role"]).write(messasge["content"]) |
|
|
| |
| if prompt := st.chat_input(): |
| |
| |
| st.chat_message("user").write(prompt) |
| |
| st.session_state.messages.append({"role": "user", "content": prompt}) |
|
|
| |
| response = client.chat.completions.create(model="google/gemma-2b-it", messages=st.session_state.messages) |
|
|
| msg = response.choices[0].message.content |
| |
| |
| st.chat_message("assistant").write(msg) |
|
|
| |
| st.session_state.messages.append({"role": "assistant", "content": msg}) |