|
|
| import numpy as np |
| import streamlit as st |
| from openai import OpenAI |
| import os |
| import sys |
| from dotenv import load_dotenv, dotenv_values |
| load_dotenv() |
|
|
| |
| client = OpenAI( |
| base_url="https://api-inference.huggingface.co/v1", |
| api_key=os.environ.get('API_KEY') |
| ) |
|
|
| |
| model_links = { |
| "GPT-4o": "meta-llama/Meta-Llama-3-8B-Instruct", |
| "GPT-4": "meta-llama/Meta-Llama-3.1-70B-Instruct", |
| } |
|
|
| def reset_conversation(): |
| ''' |
| Resets Conversation |
| ''' |
| st.session_state.conversation = [] |
| st.session_state.messages = [] |
| return None |
| |
|
|
|
|
|
|
| |
| models =[key for key in model_links.keys()] |
|
|
| |
| selected_model = st.sidebar.selectbox("Select a GPT model", models) |
|
|
| |
| st.sidebar.button('New Chat', on_click=reset_conversation) |
|
|
| |
| temp_values = st.sidebar.slider('ChatGPT Temperature', 0.0, 1.0, (0.5)) |
| st.sidebar.markdown("Temperature in ChatGPT affects the quality and coherence of the generated text.") |
| st.sidebar.markdown("**For optimum results, we recommend selecting a temperature between 0.5 and 0.7**") |
|
|
|
|
| |
| st.sidebar.markdown("*The content created may not be accurate.*") |
| st.sidebar.markdown("\n Our website: [Chat-GPT-Free.com](https://chat-gpt-free.com/).") |
|
|
|
|
| if "prev_option" not in st.session_state: |
| st.session_state.prev_option = selected_model |
|
|
| if st.session_state.prev_option != selected_model: |
| st.session_state.messages = [] |
| |
| st.session_state.prev_option = selected_model |
| reset_conversation() |
|
|
|
|
|
|
| |
| repo_id = model_links[selected_model] |
|
|
|
|
| st.subheader(f'[Chat-GPT-Free.com](https://chat-gpt-free.com/) with AI model {selected_model}') |
| |
|
|
| |
| if selected_model not in st.session_state: |
| st.session_state[selected_model] = model_links[selected_model] |
|
|
| |
| if "messages" not in st.session_state: |
| st.session_state.messages = [] |
|
|
|
|
| |
| for message in st.session_state.messages: |
| with st.chat_message(message["role"]): |
| st.markdown(message["content"]) |
|
|
|
|
|
|
| |
| if prompt := st.chat_input(f"Hi. I'm {selected_model}. How can I help you today?"): |
| |
| with st.chat_message("user"): |
| st.markdown(prompt) |
| |
| st.session_state.messages.append({"role": "user", "content": prompt}) |
| |
|
|
| |
| with st.chat_message("assistant"): |
| try: |
| stream = client.chat.completions.create( |
| model=model_links[selected_model], |
| messages=[ |
| {"role": m["role"], "content": m["content"]} |
| for m in st.session_state.messages |
| ], |
| temperature=temp_values, |
| stream=True, |
| max_tokens=3000, |
| ) |
| |
| response = st.write_stream(stream) |
|
|
| except Exception as e: |
| |
| response = "The GPT is overloaded!\ |
| \n Repeat your request later :( " |
| st.write(response) |
|
|
| |
|
|
| st.session_state.messages.append({"role": "assistant", "content": response}) |
|
|