"use client"; import Link from "next/link"; export function HeroSection() { return (
🏆 Hackathon 2025 Powered by TigerGraph

Graphs make{" "}
LLM inference{" "} smarter

A 3-pipeline system that routes queries through knowledge graphs when it matters — cutting tokens by 44%, speeding up multi-hop questions, and delivering measurably better answers.

Try Live Demo View Benchmarks
graphrag_pipeline.py
{"# AI Factory Model — 4-Layer Architecture"}

class GraphRAGPipeline :
graph {" = "} TigerGraphCloud ()
router {" = "} AdaptiveQueryRouter ()
llm {" = "} UniversalLLM ( "gemini-2.5-flash" )
eval {" = "} RAGASEvaluator ()

def query (self, q):
route = self.router. classify (q)
context = self.graph. traverse (q)
return self.llm. generate (context)
); }