muthuk1 commited on
Commit
dffc215
Β·
verified Β·
1 Parent(s): 0a8419b

Add 4-tab Gradio Dashboard (Live Comparison, Batch Benchmark, Cost Analysis, Graph Explorer)

Browse files
Files changed (1) hide show
  1. graphrag/dashboard.py +488 -0
graphrag/dashboard.py ADDED
@@ -0,0 +1,488 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ GraphRAG Comparison Dashboard β€” 4-Tab Gradio UI
3
+ ================================================
4
+ Tab 1: Live Query Comparison (side-by-side)
5
+ Tab 2: Batch Benchmark Results (HotpotQA)
6
+ Tab 3: Cost Analysis (projections + distributions)
7
+ Tab 4: Graph Explorer (interactive knowledge graph + reasoning paths)
8
+
9
+ Novelties: Adaptive routing, graph reasoning explanations, real-time cost tracking
10
+ """
11
+ import json
12
+ import logging
13
+ import os
14
+ import time
15
+ from typing import Any, Dict, List, Optional, Tuple
16
+
17
+ import gradio as gr
18
+ import pandas as pd
19
+ import plotly.express as px
20
+ import plotly.graph_objects as go
21
+ from plotly.subplots import make_subplots
22
+
23
+ from graphrag.layers.graph_layer import GraphLayer
24
+ from graphrag.layers.llm_layer import LLMLayer
25
+ from graphrag.layers.orchestration_layer import InferenceOrchestrator, EmbeddingManager
26
+ from graphrag.layers.evaluation_layer import EvaluationLayer, EvalSample, compute_f1, compute_exact_match
27
+ from graphrag.benchmark import BenchmarkRunner
28
+
29
+ logger = logging.getLogger(__name__)
30
+
31
+ # ── Global State ─────────────────────────────────────────
32
+ orchestrator = None
33
+ evaluator = None
34
+ benchmark_runner = None
35
+ _initialized = False
36
+ _benchmark_results = []
37
+
38
+
39
+ def initialize_system():
40
+ """Initialize all components."""
41
+ global orchestrator, evaluator, benchmark_runner, _initialized
42
+ if _initialized:
43
+ return "βœ… System already initialized."
44
+
45
+ llm = LLMLayer(api_key=os.getenv("OPENAI_API_KEY", ""),
46
+ model=os.getenv("LLM_MODEL", "gpt-4o-mini"))
47
+ llm.initialize()
48
+
49
+ embedder = EmbeddingManager(provider="openai", model="text-embedding-3-small",
50
+ api_key=os.getenv("OPENAI_API_KEY", ""))
51
+ embedder.initialize()
52
+
53
+ graph = GraphLayer()
54
+ tg_host = os.getenv("TG_HOST", "")
55
+ if tg_host:
56
+ graph.connect()
57
+
58
+ orchestrator = InferenceOrchestrator(graph_layer=graph, llm_layer=llm, embedder=embedder)
59
+ orchestrator.initialize()
60
+
61
+ evaluator = EvaluationLayer(eval_llm_model=os.getenv("LLM_MODEL", "gpt-4o-mini"),
62
+ api_key=os.getenv("OPENAI_API_KEY", ""))
63
+ evaluator.initialize()
64
+
65
+ benchmark_runner = BenchmarkRunner(orchestrator, evaluator)
66
+ _initialized = True
67
+ return "βœ… System initialized successfully! (LLM: " + llm.model + ")"
68
+
69
+
70
+ # ── Tab 1: Live Query Comparison ─────────────────────────
71
+
72
+ def run_live_comparison(query, enable_adaptive, top_k, hops):
73
+ if not query.strip():
74
+ return ("Please enter a query.", "", "", "", 0, 0, 0, 0, 0, 0, None, "", "", "")
75
+ if not _initialized:
76
+ initialize_system()
77
+
78
+ try:
79
+ passages = _get_demo_passages(query)
80
+ if enable_adaptive:
81
+ comparison = orchestrator.run_adaptive(query, passages)
82
+ else:
83
+ comparison = orchestrator.run_comparison(query, passages, int(top_k), int(hops))
84
+
85
+ b, g = comparison.baseline, comparison.graphrag
86
+ fig = _build_comparison_chart(b, g)
87
+
88
+ baseline_ctx = "\n\n---\n\n".join([
89
+ f"**[{i+1}]:** {c[:300]}{'...' if len(c) > 300 else ''}"
90
+ for i, c in enumerate(b.contexts[:5])
91
+ ]) or "No contexts."
92
+
93
+ graphrag_ctx = "\n\n---\n\n".join([
94
+ f"**[{i+1}]:** {c[:300]}{'...' if len(c) > 300 else ''}"
95
+ for i, c in enumerate(g.contexts[:5])
96
+ ]) or "No contexts."
97
+
98
+ entities_display = ""
99
+ if g.entities_found:
100
+ entities_display = "**Entities Found:**\n" + "\n".join(
101
+ [f"- πŸ”΅ **{e.get('name','N/A')}** ({e.get('entity_type','N/A')})"
102
+ for e in g.entities_found[:8]])
103
+ if g.relations_traversed:
104
+ entities_display += "\n\n**Relationships:**\n" + "\n".join(
105
+ [f"- πŸ”— {r}" for r in g.relations_traversed[:8]])
106
+
107
+ routing_info = ""
108
+ if enable_adaptive:
109
+ routing_info = (
110
+ f"**🧠 Adaptive Routing:**\n"
111
+ f"- Complexity: {g.complexity_score:.2f} | Type: {g.query_type}\n"
112
+ f"- Recommended: **{comparison.recommended_pipeline.upper()}**\n"
113
+ f"- {comparison.routing_reason}")
114
+
115
+ return ("βœ… Done!", b.answer, g.answer, routing_info,
116
+ b.total_tokens, g.total_tokens,
117
+ round(b.latency_ms, 1), round(g.latency_ms, 1),
118
+ round(b.cost_usd, 6), round(g.cost_usd, 6),
119
+ fig, baseline_ctx, graphrag_ctx, entities_display)
120
+ except Exception as e:
121
+ return (f"❌ Error: {e}", "", "", "", 0, 0, 0, 0, 0, 0, None, "", "", "")
122
+
123
+
124
+ def _get_demo_passages(query):
125
+ try:
126
+ from datasets import load_dataset
127
+ ds = load_dataset("hotpotqa/hotpot_qa", "distractor", split="validation", streaming=True)
128
+ for row in ds:
129
+ return [f"{t}: {' '.join(s)}"
130
+ for t, s in zip(row["context"]["title"], row["context"]["sentences"])]
131
+ except Exception:
132
+ pass
133
+ return ["Demo passage. Connect TigerGraph for full functionality.",
134
+ "GraphRAG extracts entities and relationships for better retrieval.",
135
+ "The system supports both baseline RAG and GraphRAG pipelines."]
136
+
137
+
138
+ def _build_comparison_chart(baseline, graphrag):
139
+ fig = make_subplots(rows=1, cols=3, subplot_titles=("Tokens", "Latency (ms)", "Cost ($)"),
140
+ horizontal_spacing=0.12)
141
+ colors = ["#3498db", "#e74c3c"]
142
+ methods = ["Baseline", "GraphRAG"]
143
+ fig.add_trace(go.Bar(x=methods, y=[baseline.total_tokens, graphrag.total_tokens],
144
+ marker_color=colors, text=[baseline.total_tokens, graphrag.total_tokens],
145
+ textposition='auto', showlegend=False), row=1, col=1)
146
+ fig.add_trace(go.Bar(x=methods, y=[baseline.latency_ms, graphrag.latency_ms],
147
+ marker_color=colors, text=[f"{baseline.latency_ms:.0f}", f"{graphrag.latency_ms:.0f}"],
148
+ textposition='auto', showlegend=False), row=1, col=2)
149
+ fig.add_trace(go.Bar(x=methods, y=[baseline.cost_usd, graphrag.cost_usd],
150
+ marker_color=colors, text=[f"${baseline.cost_usd:.6f}", f"${graphrag.cost_usd:.6f}"],
151
+ textposition='auto', showlegend=False), row=1, col=3)
152
+ fig.update_layout(height=350, margin=dict(t=40, b=20, l=20, r=20),
153
+ paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)')
154
+ return fig
155
+
156
+
157
+ # ── Tab 2: Batch Benchmark ───────────────────────────────
158
+
159
+ def run_batch_benchmark(num_samples, top_k, hops, progress=gr.Progress()):
160
+ global _benchmark_results
161
+ if not _initialized:
162
+ initialize_system()
163
+
164
+ def progress_cb(cur, tot, _):
165
+ progress(cur / tot, desc=f"Processing {cur}/{tot}...")
166
+
167
+ try:
168
+ results = benchmark_runner.run_hotpotqa_benchmark(
169
+ num_samples=int(num_samples), top_k=int(top_k), hops=int(hops),
170
+ progress_callback=progress_cb)
171
+ _benchmark_results = results.get("results", [])
172
+ agg = results.get("aggregate", {})
173
+ report = results.get("report", "")
174
+
175
+ if not _benchmark_results:
176
+ return "No results.", None, None, None, report
177
+
178
+ summary = pd.DataFrame({
179
+ "Metric": ["Avg F1", "Avg EM", "Avg Tokens", "Avg Cost ($)", "Avg Latency (ms)", "F1 Win Rate"],
180
+ "Baseline RAG": [
181
+ f"{agg['baseline']['avg_f1']:.4f}", f"{agg['baseline']['avg_em']:.4f}",
182
+ f"{agg['baseline']['avg_tokens']:.0f}", f"${agg['baseline']['avg_cost']:.6f}",
183
+ f"{agg['baseline']['avg_latency_ms']:.0f}",
184
+ f"{1 - agg.get('graphrag_f1_win_rate', 0.5):.1%}"],
185
+ "GraphRAG": [
186
+ f"{agg['graphrag']['avg_f1']:.4f}", f"{agg['graphrag']['avg_em']:.4f}",
187
+ f"{agg['graphrag']['avg_tokens']:.0f}", f"${agg['graphrag']['avg_cost']:.6f}",
188
+ f"{agg['graphrag']['avg_latency_ms']:.0f}",
189
+ f"{agg.get('graphrag_f1_win_rate', 0.5):.1%}"]
190
+ })
191
+
192
+ bar_fig = _build_benchmark_bar(agg)
193
+ radar_fig = _build_radar(agg)
194
+ return (f"βœ… Done! {len(_benchmark_results)} samples.", summary, bar_fig, radar_fig, report)
195
+ except Exception as e:
196
+ return f"❌ Error: {e}", None, None, None, ""
197
+
198
+
199
+ def _build_benchmark_bar(agg):
200
+ metrics = ["F1", "EM", "Context Hit"]
201
+ bvals = [agg["baseline"]["avg_f1"], agg["baseline"]["avg_em"], agg["baseline"]["avg_context_hit"]]
202
+ gvals = [agg["graphrag"]["avg_f1"], agg["graphrag"]["avg_em"], agg["graphrag"]["avg_context_hit"]]
203
+ fig = go.Figure(data=[
204
+ go.Bar(name="Baseline", x=metrics, y=bvals, marker_color="#3498db",
205
+ text=[f"{v:.3f}" for v in bvals], textposition='auto'),
206
+ go.Bar(name="GraphRAG", x=metrics, y=gvals, marker_color="#e74c3c",
207
+ text=[f"{v:.3f}" for v in gvals], textposition='auto')])
208
+ fig.update_layout(barmode='group', title="Answer Quality", yaxis_title="Score", height=400,
209
+ paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)')
210
+ return fig
211
+
212
+
213
+ def _build_radar(agg):
214
+ b, g = agg["baseline"], agg["graphrag"]
215
+ cats = ["F1", "EM", "Context Hit", "Token Eff.", "Cost Eff."]
216
+ te = min(b["avg_tokens"] / max(g["avg_tokens"], 1), 2.0)
217
+ ce = min(b["avg_cost"] / max(g["avg_cost"], 0.000001), 2.0)
218
+ bv = [b["avg_f1"], b["avg_em"], b["avg_context_hit"], 1.0, 1.0]
219
+ gv = [g["avg_f1"], g["avg_em"], g["avg_context_hit"], te, ce]
220
+ fig = go.Figure()
221
+ fig.add_trace(go.Scatterpolar(r=bv+[bv[0]], theta=cats+[cats[0]], fill='toself',
222
+ name='Baseline', line_color='#3498db', opacity=0.6))
223
+ fig.add_trace(go.Scatterpolar(r=gv+[gv[0]], theta=cats+[cats[0]], fill='toself',
224
+ name='GraphRAG', line_color='#e74c3c', opacity=0.6))
225
+ fig.update_layout(polar=dict(radialaxis=dict(visible=True, range=[0, 1.2])),
226
+ title="Multi-Metric Radar", height=450, paper_bgcolor='rgba(0,0,0,0)')
227
+ return fig
228
+
229
+
230
+ # ── Tab 3: Cost Analysis ─────────────────────────────────
231
+
232
+ def compute_cost_analysis(num_queries, model):
233
+ pricing = {
234
+ "gpt-4o-mini": {"input": 0.00015, "output": 0.0006},
235
+ "gpt-4o": {"input": 0.0025, "output": 0.01},
236
+ "gpt-3.5-turbo": {"input": 0.0005, "output": 0.0015},
237
+ "claude-3-5-sonnet": {"input": 0.003, "output": 0.015},
238
+ "claude-3-haiku": {"input": 0.00025, "output": 0.00125},
239
+ }
240
+ p = pricing.get(model, pricing["gpt-4o-mini"])
241
+ n = int(num_queries)
242
+
243
+ if _benchmark_results:
244
+ ab = sum(r["baseline_tokens"] for r in _benchmark_results) / len(_benchmark_results)
245
+ ag = sum(r["graphrag_tokens"] for r in _benchmark_results) / len(_benchmark_results)
246
+ acb = sum(r["baseline_cost"] for r in _benchmark_results) / len(_benchmark_results)
247
+ acg = sum(r["graphrag_cost"] for r in _benchmark_results) / len(_benchmark_results)
248
+ else:
249
+ ab, ag = 950, 2400
250
+ acb = (800/1000*p["input"] + 150/1000*p["output"])
251
+ acg = (2200/1000*p["input"] + 200/1000*p["output"])
252
+
253
+ summary = pd.DataFrame({
254
+ "Metric": ["Avg Tokens", "Cost/Query", f"Total ({n:,}q)", "Monthly (1K qpd)", "Annual"],
255
+ "Baseline": [f"{ab:.0f}", f"${acb:.6f}", f"${acb*n:.4f}", f"${acb*1000*30:.2f}", f"${acb*1000*365:.2f}"],
256
+ "GraphRAG": [f"{ag:.0f}", f"${acg:.6f}", f"${acg*n:.4f}", f"${acg*1000*30:.2f}", f"${acg*1000*365:.2f}"],
257
+ "Ratio": [f"{ag/max(ab,1):.2f}x"]*5
258
+ })
259
+
260
+ qr = list(range(0, n+1, max(n//50, 1)))
261
+ fig_cum = go.Figure()
262
+ fig_cum.add_trace(go.Scatter(x=qr, y=[acb*q for q in qr], mode='lines', name='Baseline',
263
+ line=dict(color='#3498db', width=3)))
264
+ fig_cum.add_trace(go.Scatter(x=qr, y=[acg*q for q in qr], mode='lines', name='GraphRAG',
265
+ line=dict(color='#e74c3c', width=3)))
266
+ fig_cum.update_layout(title=f"Cumulative Cost ({model})", xaxis_title="Queries", yaxis_title="Cost ($)",
267
+ height=400, paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)')
268
+
269
+ fig_tok = go.Figure()
270
+ if _benchmark_results:
271
+ fig_tok.add_trace(go.Histogram(x=[r["baseline_tokens"] for r in _benchmark_results],
272
+ name="Baseline", opacity=0.7, marker_color="#3498db"))
273
+ fig_tok.add_trace(go.Histogram(x=[r["graphrag_tokens"] for r in _benchmark_results],
274
+ name="GraphRAG", opacity=0.7, marker_color="#e74c3c"))
275
+ fig_tok.update_layout(barmode='overlay', title="Token Distribution", height=400,
276
+ paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)')
277
+ else:
278
+ fig_tok.add_annotation(text="Run benchmark first for distribution", showarrow=False)
279
+
280
+ return summary, fig_cum, fig_tok
281
+
282
+
283
+ # ── Tab 4: Graph Explorer ────────────────────────────────
284
+
285
+ def explore_graph(query, depth):
286
+ if not _initialized:
287
+ initialize_system()
288
+ try:
289
+ import networkx as nx
290
+ passages = _get_demo_passages(query)
291
+ gr_result = orchestrator.run_graphrag(query, passages, hops=int(depth))
292
+
293
+ G = nx.Graph()
294
+ for e in gr_result.entities_found[:20]:
295
+ G.add_node(e.get("name", "?"), entity_type=e.get("entity_type", "CONCEPT"),
296
+ description=e.get("description", ""))
297
+ for r in gr_result.relations_traversed[:30]:
298
+ parts = r.split(" -[")
299
+ if len(parts) == 2:
300
+ src = parts[0].strip()
301
+ rest = parts[1].split("]-> ")
302
+ if len(rest) == 2:
303
+ rtype = rest[0].strip()
304
+ tgt = rest[1].split(": ")[0].strip()
305
+ G.add_edge(src, tgt, relation=rtype)
306
+
307
+ if not G.nodes():
308
+ G.add_node("Query", entity_type="QUERY")
309
+ for e in gr_result.entities_found[:5]:
310
+ G.add_node(e.get("name", "Entity"), entity_type=e.get("entity_type", "CONCEPT"))
311
+ G.add_edge("Query", e.get("name", "Entity"), relation="FOUND")
312
+
313
+ pos = nx.spring_layout(G, k=2, iterations=50, seed=42)
314
+ colors_map = {"PERSON": "#FF6B6B", "ORGANIZATION": "#4ECDC4", "LOCATION": "#45B7D1",
315
+ "EVENT": "#FFA07A", "DATE": "#98D8C8", "CONCEPT": "#AED6F1",
316
+ "WORK": "#F9E79F", "PRODUCT": "#D7BDE2", "TECHNOLOGY": "#82E0AA", "QUERY": "#F39C12"}
317
+
318
+ edge_x, edge_y = [], []
319
+ for u, v in G.edges():
320
+ x0, y0 = pos[u]; x1, y1 = pos[v]
321
+ edge_x.extend([x0, x1, None]); edge_y.extend([y0, y1, None])
322
+
323
+ fig = go.Figure()
324
+ fig.add_trace(go.Scatter(x=edge_x, y=edge_y, mode='lines',
325
+ line=dict(width=1.5, color='#888'), hoverinfo='none', showlegend=False))
326
+ fig.add_trace(go.Scatter(
327
+ x=[pos[n][0] for n in G.nodes()], y=[pos[n][1] for n in G.nodes()],
328
+ mode='markers+text', text=list(G.nodes()), textposition="top center", textfont=dict(size=10),
329
+ marker=dict(size=[20 + G.degree(n)*5 for n in G.nodes()],
330
+ color=[colors_map.get(G.nodes[n].get("entity_type", "CONCEPT"), "#AED6F1") for n in G.nodes()],
331
+ line=dict(width=2, color='white')),
332
+ hovertext=[f"{n} ({G.nodes[n].get('entity_type','')})" for n in G.nodes()],
333
+ hoverinfo='text', showlegend=False))
334
+ fig.update_layout(title=f"Knowledge Graph: {query[:50]}...", showlegend=False, hovermode='closest',
335
+ xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
336
+ yaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
337
+ height=500, margin=dict(b=20,l=20,r=20,t=40),
338
+ paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)')
339
+
340
+ info = {"nodes": len(G.nodes()), "edges": len(G.edges()),
341
+ "entities": len(gr_result.entities_found), "relations": len(gr_result.relations_traversed)}
342
+ stats = pd.DataFrame({"Metric": ["Nodes", "Edges", "Avg Degree", "Density", "Entities", "Relations"],
343
+ "Value": [len(G.nodes()), len(G.edges()),
344
+ f"{sum(d for _,d in G.degree())/max(len(G.nodes()),1):.1f}",
345
+ f"{nx.density(G):.3f}",
346
+ len(gr_result.entities_found), len(gr_result.relations_traversed)]})
347
+
348
+ explanation = orchestrator.explain_graphrag_reasoning(query, gr_result)
349
+ return fig, info, stats, explanation, gr_result.answer
350
+ except Exception as e:
351
+ empty = go.Figure()
352
+ empty.add_annotation(text=str(e), showarrow=False)
353
+ return empty, {}, pd.DataFrame(), str(e), ""
354
+
355
+
356
+ # ── Build Dashboard ───────────────────────────────────────
357
+
358
+ def build_dashboard():
359
+ with gr.Blocks(title="GraphRAG Inference Dashboard") as demo:
360
+ gr.Markdown("""
361
+ # πŸ” GraphRAG Inference Hackathon β€” Comparison Dashboard
362
+ ### Proving that graphs make LLM inference faster, cheaper, and smarter
363
+ **Architecture:** TigerGraph (Graph) β†’ Orchestration β†’ LLM β†’ Evaluation
364
+ | **Novelties:** 🧠 Adaptive Routing | πŸ“‹ Schema-Bounded Extraction | πŸ”— Reasoning Paths | πŸ”‘ Dual-Level Keywords
365
+ """)
366
+
367
+ with gr.Row():
368
+ init_btn = gr.Button("πŸš€ Initialize System", variant="primary", scale=2)
369
+ init_status = gr.Textbox(label="Status", interactive=False, scale=3)
370
+ init_btn.click(fn=initialize_system, outputs=init_status)
371
+
372
+ with gr.Tabs():
373
+ # ── Tab 1: Live Comparison ──────────────────
374
+ with gr.Tab("πŸ”΄ Live Query Comparison"):
375
+ gr.Markdown("## Side-by-Side Pipeline Comparison")
376
+ with gr.Row():
377
+ query_input = gr.Textbox(label="Question", placeholder="e.g., Were Scott Derrickson and Ed Wood of the same nationality?", lines=2, scale=3)
378
+ with gr.Column(scale=1):
379
+ adaptive = gr.Checkbox(label="🧠 Adaptive Routing", value=True)
380
+ topk = gr.Slider(1, 10, value=5, step=1, label="Top-K")
381
+ hops_s = gr.Slider(1, 4, value=2, step=1, label="Hops")
382
+
383
+ run_btn = gr.Button("β–Ά Run Comparison", variant="primary", size="lg")
384
+ status = gr.Textbox(label="Status", interactive=False)
385
+ routing = gr.Markdown(visible=True)
386
+
387
+ with gr.Row():
388
+ with gr.Column():
389
+ gr.Markdown("### πŸ”΅ Baseline RAG")
390
+ b_ans = gr.Textbox(label="Answer", lines=5, interactive=False)
391
+ with gr.Row():
392
+ b_tok = gr.Number(label="Tokens", precision=0)
393
+ b_lat = gr.Number(label="Latency (ms)", precision=1)
394
+ b_cost = gr.Number(label="Cost ($)", precision=6)
395
+ with gr.Column():
396
+ gr.Markdown("### πŸ”΄ GraphRAG")
397
+ g_ans = gr.Textbox(label="Answer", lines=5, interactive=False)
398
+ with gr.Row():
399
+ g_tok = gr.Number(label="Tokens", precision=0)
400
+ g_lat = gr.Number(label="Latency (ms)", precision=1)
401
+ g_cost = gr.Number(label="Cost ($)", precision=6)
402
+
403
+ chart = gr.Plot(label="Comparison")
404
+ with gr.Accordion("πŸ“„ Retrieved Contexts", open=False):
405
+ with gr.Row():
406
+ b_ctx = gr.Markdown()
407
+ g_ctx = gr.Markdown()
408
+ with gr.Accordion("πŸ•ΈοΈ Entities & Relations", open=False):
409
+ ent_disp = gr.Markdown()
410
+
411
+ run_btn.click(fn=run_live_comparison, inputs=[query_input, adaptive, topk, hops_s],
412
+ outputs=[status, b_ans, g_ans, routing, b_tok, g_tok, b_lat, g_lat,
413
+ b_cost, g_cost, chart, b_ctx, g_ctx, ent_disp])
414
+ gr.Examples(examples=[
415
+ ["Were Scott Derrickson and Ed Wood of the same nationality?"],
416
+ ["What government position was held by the woman who portrayed Nora Batty?"],
417
+ ["Which magazine was started first, Arthur's Magazine or First for Women?"],
418
+ ["Who was born first, Arthur Conan Doyle or Agatha Christie?"],
419
+ ["What is the capital of the country where the Eiffel Tower is located?"]],
420
+ inputs=query_input, label="πŸ“ Example Questions")
421
+
422
+ # ── Tab 2: Batch Benchmark ──────────────────
423
+ with gr.Tab("πŸ“Š Batch Benchmark"):
424
+ gr.Markdown("## Benchmark on HotpotQA")
425
+ with gr.Row():
426
+ n_samples = gr.Slider(10, 500, value=50, step=10, label="Samples")
427
+ bk = gr.Slider(1, 10, value=5, step=1, label="Top-K")
428
+ bh = gr.Slider(1, 4, value=2, step=1, label="Hops")
429
+ bench_btn = gr.Button("πŸƒ Run Benchmark", variant="primary")
430
+ bench_status = gr.Textbox(label="Status", interactive=False)
431
+ summary_df = gr.Dataframe(label="Summary")
432
+ with gr.Row():
433
+ bar_chart = gr.Plot(label="Quality")
434
+ radar_chart = gr.Plot(label="Radar")
435
+ with gr.Accordion("πŸ“ Full Report", open=False):
436
+ report = gr.Textbox(lines=30, interactive=False)
437
+ bench_btn.click(fn=run_batch_benchmark, inputs=[n_samples, bk, bh],
438
+ outputs=[bench_status, summary_df, bar_chart, radar_chart, report])
439
+
440
+ # ── Tab 3: Cost Analysis ────────────────────
441
+ with gr.Tab("πŸ’° Cost Analysis"):
442
+ gr.Markdown("## Cost & Token Analysis")
443
+ with gr.Row():
444
+ cq = gr.Slider(100, 100000, value=10000, step=100, label="Queries to Project")
445
+ cm = gr.Dropdown(["gpt-4o-mini", "gpt-4o", "gpt-3.5-turbo", "claude-3-5-sonnet", "claude-3-haiku"],
446
+ value="gpt-4o-mini", label="Model")
447
+ cost_btn = gr.Button("πŸ’΅ Calculate", variant="primary")
448
+ cost_df = gr.Dataframe(label="Breakdown")
449
+ with gr.Row():
450
+ cum_chart = gr.Plot(label="Cumulative Cost")
451
+ tok_chart = gr.Plot(label="Token Distribution")
452
+ cost_btn.click(fn=compute_cost_analysis, inputs=[cq, cm],
453
+ outputs=[cost_df, cum_chart, tok_chart])
454
+
455
+ # ── Tab 4: Graph Explorer ───────────────────
456
+ with gr.Tab("πŸ•ΈοΈ Graph Explorer"):
457
+ gr.Markdown("## Interactive Knowledge Graph Explorer\n*Visualize how GraphRAG traverses the graph*")
458
+ with gr.Row():
459
+ gq = gr.Textbox(label="Query", placeholder="Enter a question...", scale=3)
460
+ gd = gr.Slider(1, 4, value=2, step=1, label="Depth", scale=1)
461
+ exp_btn = gr.Button("πŸ” Explore", variant="primary", scale=1)
462
+ graph_plot = gr.Plot(label="Knowledge Graph")
463
+ with gr.Row():
464
+ graph_stats = gr.Dataframe(label="Stats")
465
+ node_info = gr.JSON(label="Details")
466
+ with gr.Accordion("🧠 Reasoning Path", open=True):
467
+ reasoning = gr.Markdown()
468
+ graph_ans = gr.Textbox(label="GraphRAG Answer", interactive=False)
469
+ exp_btn.click(fn=explore_graph, inputs=[gq, gd],
470
+ outputs=[graph_plot, node_info, graph_stats, reasoning, graph_ans])
471
+ gr.Examples(examples=[
472
+ ["Who directed the movie starring Tom Hanks released in 1994?"],
473
+ ["What is the relationship between Einstein and relativity?"],
474
+ ["Which country hosted the 2024 Olympics and what is its capital?"]],
475
+ inputs=gq, label="πŸ“ Examples")
476
+
477
+ gr.Markdown("""
478
+ ---
479
+ **GraphRAG Inference Hackathon** by TigerGraph | TigerGraph + GPT-4o-mini + Gradio + RAGAS
480
+ **Novelties:** Adaptive Query Routing 🧠 | Schema-Bounded Extraction πŸ“‹ | Graph Reasoning Paths πŸ”— | Dual-Level Keywords πŸ”‘
481
+ """)
482
+ return demo
483
+
484
+
485
+ if __name__ == "__main__":
486
+ logging.basicConfig(level=logging.INFO)
487
+ demo = build_dashboard()
488
+ demo.launch(server_port=7860, share=False, show_error=True)