| import streamlit as st
|
| import json
|
| import time
|
| import requests
|
| from langchain.chains import LLMChain
|
| from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder
|
| from langchain_core.messages import SystemMessage
|
| from langchain.chains.conversation.memory import ConversationBufferWindowMemory
|
| from langchain_groq import ChatGroq
|
|
|
|
|
|
|
| st.set_page_config(page_title="LOG-CHAT", page_icon="BEAC.jpg", layout="centered", menu_items=None)
|
| st.image("BEAC.jpg")
|
|
|
| query_params = st.experimental_get_query_params()
|
| page = query_params.get("page", ["chatbot"])[0]
|
|
|
|
|
| st.markdown('<div class="content">', unsafe_allow_html=True)
|
|
|
|
|
| if page == "chatbot":
|
| st.header("LOG-CHAT")
|
|
|
| def main():
|
| groq_api_key = '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'
|
| st.markdown('<div id="chatbot"></div>', unsafe_allow_html=True)
|
|
|
| system_prompt = st.text_input("System prompt:", "You are a helpful assistant.")
|
| model = st.selectbox('Choose a model', ['llama3-8b-8192', 'mixtral-8x7b-32768', 'gemma-7b-it'])
|
| conversational_memory_length = st.slider('Conversational memory length:', 1, 10, value=5)
|
|
|
| memory = ConversationBufferWindowMemory(k=conversational_memory_length, memory_key="chat_history", return_messages=True)
|
|
|
| user_question = st.text_input("Ask me a question:")
|
| send_question_to_ai = st.button("Send")
|
|
|
| if 'chat_history' not in st.session_state:
|
| st.session_state.chat_history = []
|
| else:
|
| for message in st.session_state.chat_history:
|
| memory.save_context({'input': message['human']}, {'output': message['AI']})
|
|
|
| groq_chat = ChatGroq(groq_api_key=groq_api_key, model_name=model)
|
|
|
| if send_question_to_ai:
|
| prompt = ChatPromptTemplate.from_messages(
|
| [
|
| SystemMessage(content=system_prompt),
|
| MessagesPlaceholder(variable_name="chat_history"),
|
| HumanMessagePromptTemplate.from_template("{human_input}")
|
| ]
|
| )
|
|
|
| conversation = LLMChain(
|
| llm=groq_chat,
|
| prompt=prompt,
|
| verbose=True,
|
| memory=memory
|
| )
|
|
|
| response = conversation.predict(human_input=user_question)
|
| message = {'human': user_question, 'AI': response}
|
| st.session_state.chat_history.append(message)
|
| st.write("chatbot:", response)
|
|
|
| if __name__ == "__main__":
|
| main()
|
|
|
|
|
|
|
|
|
| st.markdown("""
|
| <footer class="footer">
|
| <p>Contact us: <a href="mailto:yourname@example.com">yourname@example.com</a></p>
|
| <p>© 2024 Your Company. All rights reserved.</p>
|
| </footer>
|
| """, unsafe_allow_html=True)
|
|
|