File size: 11,539 Bytes
46c8eb0
cf439c3
 
 
3f6d044
52ac874
cf439c3
 
14dc418
cf439c3
 
d357d4a
c76ed0d
a84926c
 
cf439c3
3f6d044
 
 
cf439c3
 
 
a84926c
 
46c8eb0
de1aee4
 
46c8eb0
cf439c3
 
46c8eb0
48ac593
de1aee4
cf439c3
 
 
 
 
b72bfb1
86eb190
 
 
 
 
14dc418
 
 
b72bfb1
 
cf439c3
 
 
 
 
 
 
 
 
9a5dc6b
 
 
 
 
 
 
de1aee4
 
9a5dc6b
 
 
 
 
cf439c3
9b356a6
 
 
cf439c3
 
3f6d044
cf439c3
 
3f6d044
 
 
 
 
 
 
 
cf439c3
 
 
 
 
 
 
3f6d044
cf439c3
9a5dc6b
cf439c3
 
de1aee4
 
cf439c3
 
 
 
 
 
 
 
 
 
3f6d044
cf439c3
 
3f6d044
cf439c3
 
 
8fa2c42
cf439c3
 
 
 
 
 
 
 
 
 
3f6d044
cf439c3
 
 
 
3f6d044
a84926c
cf439c3
 
 
 
 
 
 
3f6d044
 
 
86eb190
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3f6d044
46c8eb0
3f6d044
cf439c3
 
b72bfb1
f5c02d4
 
 
 
 
 
 
b72bfb1
f5c02d4
 
 
 
 
b72bfb1
 
f5c02d4
 
b72bfb1
 
f5c02d4
 
 
b72bfb1
 
 
 
 
 
 
 
 
 
 
 
 
 
9a5dc6b
 
 
 
b72bfb1
 
 
 
 
 
 
 
 
 
 
86eb190
c76ed0d
d357d4a
86eb190
cf439c3
a84926c
9a66ef3
b72bfb1
 
 
 
9a66ef3
a84926c
 
 
b7324d7
a84926c
 
3a352a8
86eb190
cf439c3
 
 
a84926c
cf439c3
 
46c8eb0
a84926c
 
 
 
 
 
 
 
657f503
a84926c
 
 
 
 
 
3a352a8
a84926c
 
 
 
 
 
 
 
86eb190
a84926c
 
 
 
 
 
 
cf439c3
a84926c
46c8eb0
cf439c3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
import os
import streamlit as st
import pandas as pd
import openai
import sqlite3
import json
import numpy as np
import datetime
import re
from langchain.chains import RetrievalQA
from langchain.schema import Document
from langchain_core.retrievers import BaseRetriever
from pydantic import Field
from langchain_openai import ChatOpenAI
from langchain.prompts import ChatPromptTemplate

DB_PATH = "json_vector.db"
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
EMBEDDING_MODEL = "text-embedding-ada-002"

if "ingested_batches" not in st.session_state:
    st.session_state.ingested_batches = 0
if "messages" not in st.session_state:
    st.session_state.messages = []

st.set_page_config(page_title="Chat with Your JSON Vectors (Hybrid, Enhanced)", layout="wide")
st.title("Chat with Your Vectorized JSON Files (Hybrid Retrieval, SQLite, LLM)")

uploaded_files = st.file_uploader(
    "Upload JSON files in batches (any structure)", type="json", accept_multiple_files=True
)

# --- Enhanced flattening (never loses parent fields)
def flatten_json_obj(obj, parent_key="", sep="."):
    items = {}
    if isinstance(obj, dict):
        for k, v in obj.items():
            new_key = f"{parent_key}{sep}{k}" if parent_key else k
            # If this is a customer/email field, extract name!
            if (
                k.lower() in {"customer", "user", "email", "username"} and
                isinstance(v, str) and "@" in v
            ):
                local = v.split("@")[0]
                local_clean = re.sub(r'[^a-zA-Z0-9]', ' ', local)
                parts = [part for part in local_clean.split() if part]
                if parts:
                    items[new_key + "_name"] = parts[0].lower()
                    items[new_key + "_all_names"] = " ".join(parts).lower()
            items.update(flatten_json_obj(v, new_key, sep=sep))
    elif isinstance(obj, list):
        for i, v in enumerate(obj):
            new_key = f"{parent_key}{sep}{i}" if parent_key else str(i)
            items.update(flatten_json_obj(v, new_key, sep=sep))
    else:
        items[parent_key] = obj
    return items

# --- DEBUG: Show flattening of uploaded JSONs
if uploaded_files:
    st.markdown("#### DEBUG: Flat view of all uploaded JSON records")
    for file in uploaded_files:
        file.seek(0)
        try:
            raw = json.load(file)
            # NEW: Don't try to pull lists out of dicts; treat the whole dict as a record
            records = raw if isinstance(raw, list) else [raw]
            for idx, rec in enumerate(records):
                st.code(flatten_json_obj(rec))
        except Exception as e:
            st.warning(str(e))

def get_embedding(text):
    client = openai.OpenAI(api_key=OPENAI_API_KEY)
    response = client.embeddings.create(input=[text], model=EMBEDDING_MODEL)
    return response.data[0].embedding

def ensure_table():
    conn = sqlite3.connect(DB_PATH)
    cursor = conn.cursor()
    cursor.execute("""
    CREATE TABLE IF NOT EXISTS json_records (
        id INTEGER PRIMARY KEY AUTOINCREMENT,
        batch_time TEXT,
        source_file TEXT,
        raw_json TEXT,
        flat_text TEXT,
        embedding BLOB
    )
    """)
    conn.commit()
    conn.close()

def ingest_json_files(files):
    ensure_table()
    rows = []
    batch_time = datetime.datetime.utcnow().isoformat()
    for file in files:
        file.seek(0)
        raw = json.load(file)
        source_name = file.name
        # NEW: Always treat the whole dict as a record, even if it contains lists
        records = raw if isinstance(raw, list) else [raw]
        for rec in records:
            flat = flatten_json_obj(rec)
            flat_text = "; ".join([f"{k}: {v}" for k, v in flat.items()])
            rows.append((batch_time, source_name, json.dumps(rec), flat_text))
    if not rows:
        st.warning("No records found in uploaded files!")
        return
    df = pd.DataFrame(rows, columns=["batch_time", "source_file", "raw_json", "flat_text"])
    st.write(f"Flattened {len(df)} records. Generating embeddings (this may take time, please wait)...")
    df["embedding"] = df["flat_text"].apply(get_embedding)
    conn = sqlite3.connect(DB_PATH)
    cursor = conn.cursor()
    for _, row in df.iterrows():
        emb_bytes = np.array(row.embedding, dtype=np.float32).tobytes()
        cursor.execute("""
            INSERT INTO json_records (batch_time, source_file, raw_json, flat_text, embedding)
            VALUES (?, ?, ?, ?, ?)
        """, (row.batch_time, row.source_file, row.raw_json, row.flat_text, emb_bytes))
    conn.commit()
    conn.close()
    st.success(f"Ingested and indexed {len(df)} new records!")
    st.session_state.ingested_batches += 1

if uploaded_files and st.button("Ingest batch to database"):
    ingest_json_files(uploaded_files)

def query_vector_db(user_query, top_k=5):
    query_emb = get_embedding(user_query)
    conn = sqlite3.connect(DB_PATH)
    cursor = conn.cursor()
    cursor.execute("SELECT id, batch_time, source_file, raw_json, flat_text, embedding FROM json_records")
    results = []
    for row in cursor.fetchall():
        db_emb = np.frombuffer(row[5], dtype=np.float32)
        if len(db_emb) != len(query_emb): continue
        sim = np.dot(query_emb, db_emb) / (np.linalg.norm(query_emb) * np.linalg.norm(db_emb))
        results.append((sim, row))
    conn.close()
    results = sorted(results, reverse=True)[:top_k]
    docs = []
    for sim, row in results:
        meta = {
            "id": row[0],
            "batch_time": str(row[1]),
            "source_file": row[2],
            "similarity": f"{sim:.4f} (embedding)",
            "raw_json": row[3],
        }
        docs.append(Document(page_content=row[4], metadata=meta))
    return docs

def python_fuzzy_match(user_query, top_k=5):
    query_terms = set(user_query.lower().replace("@", " ").replace(".", " ").split())
    conn = sqlite3.connect(DB_PATH)
    cursor = conn.cursor()
    cursor.execute("SELECT id, batch_time, source_file, raw_json, flat_text FROM json_records")
    results = []
    for row in cursor.fetchall():
        flat_text = row[4].lower()
        score = sum(any(term in flat_text for term in query_terms) for term in query_terms)
        if score > 0:
            results.append((score, row))
    conn.close()
    results = sorted(results, reverse=True)[:top_k]
    docs = []
    for score, row in results:
        meta = {
            "id": row[0],
            "batch_time": str(row[1]),
            "source_file": row[2],
            "similarity": f"{score} (fuzzy)",
            "raw_json": row[3],
        }
        docs.append(Document(page_content=row[4], metadata=meta))
    return docs

def extract_main_entity(question):
    import re
    quoted = re.findall(r"['\"]([^'\"]+)['\"]", question)
    if quoted:
        return quoted[0].lower()
    email = re.findall(r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b", question)
    if email:
        return email[0].lower().split('@')[0]
    tokens = re.findall(r"\b([A-Za-z0-9]+)\b", question)
    stopwords = {"how", "much", "did", "spend", "was", "the", "is", "in", "on", "for", "a", "an", "of", "to", "with"}
    keywords = [t.lower() for t in tokens if t.lower() not in stopwords]
    if not keywords:
        return ""
    return max(keywords, key=len)

def filter_records_by_entity(records, entity):
    if not entity:
        return records
    matches = []
    for doc in records:
        if entity in doc.page_content.lower():
            matches.append(doc)
        elif any(entity in v.lower() for v in doc.page_content.split(';')):
            matches.append(doc)
    return matches if matches else records

def hybrid_query(user_query, top_k=5):
    vector_docs = query_vector_db(user_query, top_k=top_k)
    fuzzy_docs = python_fuzzy_match(user_query, top_k=top_k)
    all_docs = []
    seen_ids = set()
    for doc in (vector_docs + fuzzy_docs):
        doc_id = doc.metadata.get("id")
        if doc_id not in seen_ids:
            all_docs.append(doc)
            seen_ids.add(doc_id)
    entity = extract_main_entity(user_query)
    st.markdown(f"#### DEBUG: Extracted entity from question: {entity}")
    st.markdown("#### DEBUG: All retrieved docs for your query")
    for idx, doc in enumerate(all_docs):
        st.code(doc.page_content)
    entity_docs = filter_records_by_entity(all_docs, entity) if entity else all_docs
    if entity_docs:
        doc = entity_docs[0]
        if entity:
            doc.page_content = re.sub(rf"({re.escape(entity)})", r"**\1**", doc.page_content, flags=re.IGNORECASE)
        st.markdown("#### Context shown to LLM")
        st.code(doc.page_content)
        return [doc]
    else:
        return all_docs[:1]

class HybridRetriever(BaseRetriever):
    top_k: int = Field(default=5)
    def _get_relevant_documents(self, query, run_manager=None, **kwargs):
        return hybrid_query(query, self.top_k)

system_prompt = (
    "You are a JSON data assistant. "
    "If the question mentions a name or email (e.g. Johnny), match it to any field value (even as part of an email) "
    "and answer directly using the record's fields. "
    "For example, if 'customer: johnny.appleseed@gmail.com' and the question is about Johnny, you should use that record."
    "If you can't find the answer, reply: 'I don’t have that information.'"
    "Never make up data. Never ask for clarification."
)
prompt = ChatPromptTemplate.from_messages([
    ("system", system_prompt),
    ("human", "Here are the most relevant records:\n{context}\n\nQuestion: {question}")
])

llm = ChatOpenAI(model="gpt-4.1", openai_api_key=OPENAI_API_KEY, temperature=0)
retriever = HybridRetriever(top_k=5)
qa_chain = RetrievalQA.from_chain_type(
    llm=llm,
    retriever=retriever,
    chain_type_kwargs={"prompt": prompt},
    return_source_documents=True,
)

st.markdown("### Ask any question about your data, just like ChatGPT.")
for msg in st.session_state.messages:
    if msg["role"] == "user":
        st.markdown(f"<div style='color: #4F8BF9;'><b>User:</b> {msg['content']}</div>", unsafe_allow_html=True)
    elif msg["role"] == "assistant":
        st.markdown(f"<div style='color: #1C6E4C;'><b>Agent:</b> {msg['content']}</div>", unsafe_allow_html=True)
    elif msg["role"] == "function":
        st.markdown(f"<details><summary><b>Function Output:</b></summary><pre>{msg['content']}</pre></details>", unsafe_allow_html=True)

def send_message():
    user_input = st.session_state.temp_input.strip()
    if not user_input:
        return
    st.session_state.messages.append({"role": "user", "content": user_input})
    with st.spinner("Thinking..."):
        result = qa_chain({"query": user_input})
        answer = result['result']
        st.session_state.messages.append({"role": "assistant", "content": answer})
        docs = result['source_documents']
        doc_list = []
        for doc in docs:
            doc_list.append({
                "file": doc.metadata["source_file"],
                "id": doc.metadata["id"],
                "similarity": doc.metadata["similarity"],
                "record": json.loads(doc.metadata["raw_json"])
            })
        st.session_state.messages.append({"role": "function", "content": json.dumps(doc_list, indent=2)})
    st.session_state.temp_input = ""

st.text_input("Your message:", key="temp_input", on_change=send_message)

if st.button("Clear chat"):
    st.session_state.messages = []

st.info(f"Batches ingested so far (this session): {st.session_state.ingested_batches}")