mpq_ai / app.py
notjulietxd's picture
Update app.py
c2e5e6d verified
import streamlit as st
import requests
import json
import os
# Set page config
st.set_page_config(page_title="Military Plaques Chat Assistant", page_icon="📛")
# Set up session state for chat history
if "messages" not in st.session_state:
st.session_state.messages = [{"role": "assistant", "content": "Hi! I'm the Military Plaques AI assistant. How can I help you today?"}]
# Flowise API configuration
FLOWISE_URL = os.getenv("FLOWISE_URL")
FLOWISE_API_KEY = FLOWISE_URL
def get_flowise_response(prompt):
"""Get response from Flowise API"""
headers = {
"Authorization": f"Bearer {FLOWISE_API_KEY}",
"Content-Type": "application/json"
}
# Send only role and content in history
payload = {
"question": prompt,
"history": [{"role": m["role"], "content": m["content"]} for m in st.session_state.messages]
}
try:
response = requests.post(FLOWISE_URL, headers=headers, json=payload)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
return {"error": f"API request failed: {str(e)}"}
except json.JSONDecodeError:
return {"error": "Invalid JSON response from API"}
# Display chat messages
for index, message in enumerate(st.session_state.messages):
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Display sources if available
if message["role"] == "assistant":
sources = message.get("sources", [])
if sources:
st.markdown("**Check out the following links:**")
for idx, source in enumerate(sources, 1):
st.markdown(f"{idx}. {source}")
else:
pass
# Starter prompts
starter_prompts = [
"What types of plaques do you offer?",
"Can I customize my plaque design?",
"How long does it take to receive my order?",
"Contact details"
]
# Show starter buttons only if no user input yet
if len(st.session_state.messages) == 1:
cols = st.columns(2)
for i, prompt in enumerate(starter_prompts):
with cols[i % 2]:
if st.button(prompt):
# Add user message
st.session_state.messages.append({"role": "user", "content": prompt})
# Get response
with st.spinner("Thinking..."):
flowise_response = get_flowise_response(prompt)
if "error" in flowise_response:
response_content = f"Error: {flowise_response['error']}"
sources = []
follow_up_prompts = []
else:
response_content = flowise_response.get("text", "Sorry, I didn't get that.")
# Extract unique sources
source_docs = flowise_response.get("sourceDocuments", [])
sources = list(set(
doc.get("metadata", {}).get("source", "")
for doc in source_docs
))
sources = [s for s in sources if s] # Remove empty strings
# Parse follow-up prompts
follow_up_str = flowise_response.get("followUpPrompts", "[]")
try:
# Handle double-encoded JSON string
parsed = json.loads(follow_up_str)
if isinstance(parsed, str):
follow_up_prompts = json.loads(parsed)
else:
follow_up_prompts = parsed
except json.JSONDecodeError:
follow_up_prompts = []
# Add assistant response
st.session_state.messages.append({
"role": "assistant",
"content": response_content,
"sources": sources,
"follow_up_prompts": follow_up_prompts
})
st.rerun()
# Handle user input
if prompt := st.chat_input("Type your message here..."):
# Add user message
st.session_state.messages.append({"role": "user", "content": prompt})
# Get response
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
flowise_response = get_flowise_response(prompt)
if "error" in flowise_response:
response_content = f"Error: {flowise_response['error']}"
sources = []
follow_up_prompts = []
else:
response_content = flowise_response.get("text", "Sorry, I didn't get that.")
# Extract unique sources
source_docs = flowise_response.get("sourceDocuments", [])
sources = list(set(
doc.get("metadata", {}).get("source", "")
for doc in source_docs
))
sources = [s for s in sources if s] # Remove empty strings
# Parse follow-up prompts
follow_up_str = flowise_response.get("followUpPrompts", "[]")
try:
# Handle double-encoded JSON string
parsed = json.loads(follow_up_str)
if isinstance(parsed, str):
follow_up_prompts = json.loads(parsed)
else:
follow_up_prompts = parsed
except json.JSONDecodeError:
follow_up_prompts = []
# Display response
st.markdown(response_content)
# Display sources
if sources:
st.markdown("**Check out the following links:**")
for idx, source in enumerate(sources, 1):
st.markdown(f"{idx}. {source}")
else:
pass
# Add assistant response to history
st.session_state.messages.append({
"role": "assistant",
"content": response_content,
"sources": sources,
"follow_up_prompts": follow_up_prompts
})