File size: 3,985 Bytes
96f0aad 52ac874 657f503 48ac593 c7cc5ed 65ce454 657f503 65ce454 c7cc5ed 50ba7ba 48ac593 50ba7ba 48ac593 c7cc5ed 4e778f2 c7cc5ed 657f503 50ba7ba 657f503 c7cc5ed 48ac593 657f503 48ac593 657f503 0fa3514 657f503 0fa3514 657f503 0fa3514 657f503 0fa3514 657f503 e1dabbf 657f503 ed83876 e32a04d 657f503 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 | import streamlit as st
import json
from langchain.chat_models import ChatOpenAI
from langchain.agents import initialize_agent, Tool
def normalize(s):
return ' '.join(str(s).lower().replace("_", " ").replace("-", " ").replace(".", " ").split())
def is_fuzzy_match(a, b, threshold=0.7):
from difflib import SequenceMatcher
ratio = SequenceMatcher(None, a, b).ratio()
return ratio >= threshold or a in b or b in a
def recursive_fuzzy_value_search(target_value):
matches = []
norm_target = normalize(target_value)
for file_name, data in st.session_state.json_data.items():
def _search(obj, path):
if isinstance(obj, dict):
for k, v in obj.items():
if isinstance(v, (str, int, float, bool)) and is_fuzzy_match(norm_target, normalize(v)):
matches.append({
"file": file_name,
"key": k,
"path": ".".join(path + [k]),
"value": v
})
_search(v, path + [k])
elif isinstance(obj, list):
for idx, item in enumerate(obj):
_search(item, path + [f"[{idx}]"])
_search(data, [])
return matches
# LangChain Tool for LLM
def json_search_tool(query: str) -> str:
"""Search all uploaded JSON files for any value (fuzzy match); returns matching fields and values."""
results = recursive_fuzzy_value_search(query)
if not results:
return f"No match for '{query}'."
answer = []
for res in results:
answer.append(f"{res['file']} | {res['key']} ({res['path']}): {res['value']}")
return "\n".join(answer)
# Streamlit UI
if "json_data" not in st.session_state:
st.session_state.json_data = {}
if "chat_history" not in st.session_state:
st.session_state.chat_history = []
st.set_page_config(page_title="Chat with Your JSONs!", layout="wide")
st.title("Chat with Your JSON Files (powered by GPT + instant JSON search)")
uploaded_files = st.sidebar.file_uploader(
"Choose one or more JSON files", type="json", accept_multiple_files=True
)
if uploaded_files:
st.session_state.json_data.clear()
for f in uploaded_files:
content = json.load(f)
st.session_state.json_data[f.name] = content
st.sidebar.success("All JSON files loaded.")
import os
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
if not OPENAI_API_KEY:
st.warning("You must set your OPENAI_API_KEY for chat.")
else:
llm = ChatOpenAI(model_name="gpt-4.1", openai_api_key=OPENAI_API_KEY)
tools = [
Tool(
name="json_search",
func=json_search_tool,
description="Find any value (name, product, number, etc) across all loaded JSON files. Input is what the user wants to find (e.g. 'iphone', 'apps installed', or 'alice')."
)
]
agent = initialize_agent(
tools=tools,
llm=llm,
agent="chat-conversational-react-description",
verbose=False
)
for msg in st.session_state.chat_history:
if msg["role"] == "user":
st.markdown(f"<div style='color: #4F8BF9;'><b>User:</b> {msg['content']}</div>", unsafe_allow_html=True)
else:
st.markdown(f"<div style='color: #1C6E4C;'><b>Agent:</b> {msg['content']}</div>", unsafe_allow_html=True)
def send_chat():
user_input = st.session_state.temp_input
if user_input.strip():
st.session_state.chat_history.append({"role": "user", "content": user_input})
agent_reply = agent.run(user_input)
st.session_state.chat_history.append({"role": "assistant", "content": agent_reply})
st.session_state.temp_input = ""
if st.session_state.json_data:
st.text_input("Your message:", key="temp_input", on_change=send_chat)
else:
st.info("Please upload at least one JSON file to start chatting.")
|