Home page: fix 8 stale/wrong facts
Browse files- StatsBar: '+21% F1 bridge queries' -> '-44% Token Reduction' (real benchmark)
- StatsBar: '55 unit tests' -> '50' (actual: 28 test_core + 22 test_novelties)
- HeroSection: 'dual-pipeline' -> '3-pipeline', '40%' -> '44%' tokens
- HowItWorks step 02: 'both pipelines' -> 'all 3 pipelines' with LLM-Only named
- HowItWorks step 04: 'Claude (or...)' -> 'Any of 12 LLM providers'; add Judge+BERTScore
- BentoShowcase: 'both pipelines' -> 'all 3 pipelines'; 'HotpotQA' -> 'Wikipedia science'
- CTASection: 'Baseline RAG and GraphRAG' -> 'all 3 pipelines'; add Judge+BERTScore
web/src/components/home/BentoShowcase.tsx
CHANGED
|
@@ -25,8 +25,8 @@ export function BentoShowcase() {
|
|
| 25 |
Live Playground
|
| 26 |
</h3>
|
| 27 |
<p style={{ color: "rgba(255,255,255,0.8)", fontSize: "0.9375rem", maxWidth: "400px" }}>
|
| 28 |
-
Ask any question and watch
|
| 29 |
-
Real-time
|
| 30 |
</p>
|
| 31 |
<div className="flex gap-2 mt-4">
|
| 32 |
<span className="badge" style={{ background: "rgba(255,255,255,0.2)", color: "white", fontSize: "0.6875rem" }}>12 LLM Providers</span>
|
|
@@ -43,7 +43,7 @@ export function BentoShowcase() {
|
|
| 43 |
Benchmarks
|
| 44 |
</h3>
|
| 45 |
<p className="body-sm" style={{ color: "var(--color-on-dark-soft)" }}>
|
| 46 |
-
Run
|
| 47 |
</p>
|
| 48 |
</div>
|
| 49 |
</Link>
|
|
|
|
| 25 |
Live Playground
|
| 26 |
</h3>
|
| 27 |
<p style={{ color: "rgba(255,255,255,0.8)", fontSize: "0.9375rem", maxWidth: "400px" }}>
|
| 28 |
+
Ask any science question and watch all 3 pipelines run simultaneously β
|
| 29 |
+
LLM-Only, Basic RAG, and GraphRAG. Real-time tokens, cost, and accuracy.
|
| 30 |
</p>
|
| 31 |
<div className="flex gap-2 mt-4">
|
| 32 |
<span className="badge" style={{ background: "rgba(255,255,255,0.2)", color: "white", fontSize: "0.6875rem" }}>12 LLM Providers</span>
|
|
|
|
| 43 |
Benchmarks
|
| 44 |
</h3>
|
| 45 |
<p className="body-sm" style={{ color: "var(--color-on-dark-soft)" }}>
|
| 46 |
+
Run Wikipedia science benchmarks with F1, LLM-Judge, BERTScore, and radar charts.
|
| 47 |
</p>
|
| 48 |
</div>
|
| 49 |
</Link>
|
web/src/components/home/CTASection.tsx
CHANGED
|
@@ -42,8 +42,8 @@ export function CTASection() {
|
|
| 42 |
maxWidth: "480px", margin: "0 auto 40px",
|
| 43 |
lineHeight: 1.6,
|
| 44 |
}}>
|
| 45 |
-
Run a live comparison
|
| 46 |
-
Measure
|
| 47 |
</p>
|
| 48 |
<div className="flex flex-wrap gap-4 justify-center">
|
| 49 |
<Link href="/playground" className="btn btn-primary btn-lg no-underline">
|
|
|
|
| 42 |
maxWidth: "480px", margin: "0 auto 40px",
|
| 43 |
lineHeight: 1.6,
|
| 44 |
}}>
|
| 45 |
+
Run a live comparison across all 3 pipelines β LLM-Only, Basic RAG, and GraphRAG.
|
| 46 |
+
Measure tokens, cost, LLM-Judge accuracy, and BERTScore β all in your browser.
|
| 47 |
</p>
|
| 48 |
<div className="flex flex-wrap gap-4 justify-center">
|
| 49 |
<Link href="/playground" className="btn btn-primary btn-lg no-underline">
|
web/src/components/home/HeroSection.tsx
CHANGED
|
@@ -36,8 +36,8 @@ export function HeroSection() {
|
|
| 36 |
</h1>
|
| 37 |
|
| 38 |
<p className="body-lg mb-10 animate-fade-in-up delay-200" style={{ maxWidth: "540px", color: "#3d3d3a" }}>
|
| 39 |
-
A
|
| 40 |
-
when it matters β cutting tokens by
|
| 41 |
and delivering measurably better answers.
|
| 42 |
</p>
|
| 43 |
|
|
|
|
| 36 |
</h1>
|
| 37 |
|
| 38 |
<p className="body-lg mb-10 animate-fade-in-up delay-200" style={{ maxWidth: "540px", color: "#3d3d3a" }}>
|
| 39 |
+
A 3-pipeline system that routes queries through knowledge graphs
|
| 40 |
+
when it matters β cutting tokens by 44%, speeding up multi-hop questions,
|
| 41 |
and delivering measurably better answers.
|
| 42 |
</p>
|
| 43 |
|
web/src/components/home/HowItWorks.tsx
CHANGED
|
@@ -11,7 +11,7 @@ const STEPS = [
|
|
| 11 |
{
|
| 12 |
number: "02",
|
| 13 |
title: "Dual Pipeline Activation",
|
| 14 |
-
description: "
|
| 15 |
detail: "Schema-bounded extraction ensures valid entities | GSQL multi-hop traversal",
|
| 16 |
color: "#0072CE",
|
| 17 |
},
|
|
@@ -25,7 +25,7 @@ const STEPS = [
|
|
| 25 |
{
|
| 26 |
number: "04",
|
| 27 |
title: "LLM Generation & Evaluation",
|
| 28 |
-
description: "
|
| 29 |
detail: "Cost tracking | Token counting | Latency measurement",
|
| 30 |
color: "#cc785c",
|
| 31 |
},
|
|
|
|
| 11 |
{
|
| 12 |
number: "02",
|
| 13 |
title: "Dual Pipeline Activation",
|
| 14 |
+
description: "All 3 pipelines execute simultaneously: LLM-Only (no retrieval), Baseline RAG (vector search β LLM), and GraphRAG (entity extraction β graph traversal β LLM).",
|
| 15 |
detail: "Schema-bounded extraction ensures valid entities | GSQL multi-hop traversal",
|
| 16 |
color: "#0072CE",
|
| 17 |
},
|
|
|
|
| 25 |
{
|
| 26 |
number: "04",
|
| 27 |
title: "LLM Generation & Evaluation",
|
| 28 |
+
description: "Any of 12 LLM providers (Gemini, GPT-4, Llama, etc.) generates answers. Evaluated with F1, Exact Match, LLM-as-a-Judge (PASS/FAIL), and BERTScore in real-time.",
|
| 29 |
detail: "Cost tracking | Token counting | Latency measurement",
|
| 30 |
color: "#cc785c",
|
| 31 |
},
|
web/src/components/home/StatsBar.tsx
CHANGED
|
@@ -2,11 +2,11 @@
|
|
| 2 |
|
| 3 |
export function StatsBar() {
|
| 4 |
const stats = [
|
| 5 |
-
{ value: "
|
| 6 |
{ value: "4", label: "AI Factory Layers", color: "#002B49" },
|
| 7 |
{ value: "12", label: "LLM Providers", color: "#0072CE" },
|
| 8 |
{ value: "5", label: "Novel Features", color: "#cc785c" },
|
| 9 |
-
{ value: "
|
| 10 |
];
|
| 11 |
|
| 12 |
return (
|
|
|
|
| 2 |
|
| 3 |
export function StatsBar() {
|
| 4 |
const stats = [
|
| 5 |
+
{ value: "β44%", label: "Token Reduction vs RAG", color: "#FF6B00" },
|
| 6 |
{ value: "4", label: "AI Factory Layers", color: "#002B49" },
|
| 7 |
{ value: "12", label: "LLM Providers", color: "#0072CE" },
|
| 8 |
{ value: "5", label: "Novel Features", color: "#cc785c" },
|
| 9 |
+
{ value: "50", label: "Unit Tests", color: "#5db8a6" },
|
| 10 |
];
|
| 11 |
|
| 12 |
return (
|