| """ TypeGPT |
| @author: NiansuhAI |
| @email: niansuhtech@gmail.com |
| |
| """ |
| import numpy as np |
| import streamlit as st |
| from openai import OpenAI |
| import os |
| import sys |
| from dotenv import load_dotenv, dotenv_values |
| load_dotenv() |
|
|
|
|
|
|
|
|
|
|
| |
| client = OpenAI( |
| base_url="https://api-inference.huggingface.co/v1", |
| api_key=os.environ.get('API_KEY') |
| ) |
|
|
| |
| model_links = { |
| "Mistral-Nemo-Instruct-2407": "mistralai/Mistral-Nemo-Instruct-2407", |
| "Meta-Llama-3.1-70B-Instruct": "meta-llama/Meta-Llama-3.1-70B-Instruct", |
| "Meta-Llama-3.1-8B-Instruct": "meta-llama/Meta-Llama-3.1-8B-Instruct", |
| "Meta-Llama-3.1-405B-Instruct-FP8": "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8", |
| "Meta-Llama-3.1-405B-Instruct": "meta-llama/Meta-Llama-3.1-405B-Instruct", |
| "Meta-Llama-3-70B-Instruct": "meta-llama/Meta-Llama-3-70B-Instruct", |
| "Meta-Llama-3-8B-Instruct": "meta-llama/Meta-Llama-3-8B-Instruct", |
| "C4ai-command-r-plus": "CohereForAI/c4ai-command-r-plus", |
| "Aya-23-35B": "CohereForAI/aya-23-35B", |
| "Zephyr-orpo-141b-A35b-v0.1": "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1", |
| "Mixtral-8x7B-Instruct-v0.1": "mistralai/Mixtral-8x7B-Instruct-v0.1", |
| "Codestral-22B-v0.1": "mistralai/Codestral-22B-v0.1", |
| "Nous-Hermes-2-Mixtral-8x7B-DPO": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", |
| "Yi-1.5-34B-Chat": "01-ai/Yi-1.5-34B-Chat", |
| "Gemma-2-27b-it": "google/gemma-2-27b-it", |
| "Meta-Llama-2-70B-Chat-HF": "meta-llama/Llama-2-70b-chat-hf", |
| "Meta-Llama-2-7B-Chat-HF": "meta-llama/Llama-2-7b-chat-hf", |
| "Meta-Llama-2-13B-Chat-HF": "meta-llama/Llama-2-13b-chat-hf", |
| "Mistral-7B-Instruct-v0.1": "mistralai/Mistral-7B-Instruct-v0.1", |
| "Mistral-7B-Instruct-v0.2": "mistralai/Mistral-7B-Instruct-v0.2", |
| "Mistral-7B-Instruct-v0.3": "mistralai/Mistral-7B-Instruct-v0.3", |
| "Falcon-7b-Instruct": "tiiuae/falcon-7b-instruct", |
| "Starchat2-15b-v0.1": "HuggingFaceH4/starchat2-15b-v0.1", |
| "Gemma-1.1-7b-it": "google/gemma-1.1-7b-it", |
| "Gemma-1.1-2b-it": "google/gemma-1.1-2b-it", |
| "Zephyr-7B-Beta": "HuggingFaceH4/zephyr-7b-beta", |
| "Zephyr-7B-Alpha": "HuggingFaceH4/zephyr-7b-alpha", |
| "Phi-3-mini-128k-instruct": "microsoft/Phi-3-mini-128k-instruct", |
| "Phi-3-mini-4k-instruct": "microsoft/Phi-3-mini-4k-instruct", |
| } |
|
|
| |
| random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg", |
| "1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg", |
| "526590d2-8817-4ff0-8c62-fdcba5306d02.jpg", |
| "1326984c-39b0-492c-a773-f120d747a7e2.jpg", |
| "42a98d03-5ed7-4b3b-af89-7c4876cb14c3.jpg", |
| "8b3317ed-2083-42ac-a575-7ae45f9fdc0d.jpg", |
| "ee17f54a-83ac-44a3-8a35-e89ff7153fb4.jpg", |
| "027eef85-ccc1-4a66-8967-5d74f34c8bb4.jpg", |
| "08f5398d-7f89-47da-a5cd-1ed74967dc1f.jpg", |
| "0fd781ff-ec46-4bdc-a4e8-24f18bf07def.jpg", |
| "0fb4aeee-f949-4c7b-a6d8-05bf0736bdd1.jpg", |
| "6edac66e-c0de-4e69-a9d6-b2e6f6f9001b.jpg", |
| "bfb9e165-c643-4993-9b3a-7e73571672a6.jpg"] |
|
|
|
|
|
|
| def reset_conversation(): |
| ''' |
| Resets Conversation |
| ''' |
| st.session_state.conversation = [] |
| st.session_state.messages = [] |
| return None |
| |
|
|
|
|
|
|
| |
| models =[key for key in model_links.keys()] |
|
|
| |
| selected_model = st.sidebar.selectbox("Select Model", models) |
|
|
| |
| temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5)) |
|
|
|
|
| |
| st.sidebar.button('Reset Chat', on_click=reset_conversation) |
|
|
|
|
| |
| st.sidebar.write(f"You're now chatting with **{selected_model}**") |
| st.sidebar.markdown("*Generated content may be inaccurate or false.*") |
| st.sidebar.markdown("\n[TypeGPT](https://typegpt.net).") |
|
|
|
|
|
|
|
|
| if "prev_option" not in st.session_state: |
| st.session_state.prev_option = selected_model |
|
|
| if st.session_state.prev_option != selected_model: |
| st.session_state.messages = [] |
| |
| st.session_state.prev_option = selected_model |
| reset_conversation() |
|
|
|
|
|
|
| |
| repo_id = model_links[selected_model] |
|
|
|
|
| st.subheader(f'TypeGPT.net - {selected_model}') |
| |
|
|
| |
| if selected_model not in st.session_state: |
| st.session_state[selected_model] = model_links[selected_model] |
|
|
| |
| if "messages" not in st.session_state: |
| st.session_state.messages = [] |
|
|
|
|
| |
| for message in st.session_state.messages: |
| with st.chat_message(message["role"]): |
| st.markdown(message["content"]) |
|
|
|
|
|
|
| |
| if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"): |
| |
| with st.chat_message("user"): |
| st.markdown(prompt) |
| |
| st.session_state.messages.append({"role": "user", "content": prompt}) |
| |
|
|
| |
| with st.chat_message("assistant"): |
| try: |
| stream = client.chat.completions.create( |
| model=model_links[selected_model], |
| messages=[ |
| {"role": m["role"], "content": m["content"]} |
| for m in st.session_state.messages |
| ], |
| temperature=temp_values, |
| stream=True, |
| max_tokens=3000, |
| ) |
| |
| response = st.write_stream(stream) |
|
|
| except Exception as e: |
| |
| response = "😵💫 Looks like someone unplugged something!\ |
| \n Either the model space is being updated or something is down.\ |
| \n\ |
| \n Try again later. \ |
| \n\ |
| \n Here's a random pic of a 🐶:" |
| st.write(response) |
| random_dog_pick = 'https://random.dog/'+ random_dog[np.random.randint(len(random_dog))] |
| st.image(random_dog_pick) |
| st.write("This was the error message:") |
| st.write(e) |
|
|
|
|
|
|
|
|
| |
|
|
| st.session_state.messages.append({"role": "assistant", "content": response}) |