import { useState, useCallback, Suspense, lazy, forwardRef } from "react"; import ReactMarkdown from "react-markdown"; import remarkGfm from "remark-gfm"; import remarkMath from "remark-math"; import rehypeKatex from "rehype-katex"; import "katex/dist/katex.min.css"; import { Prism as SyntaxHighlighter } from "react-syntax-highlighter"; import { oneDark } from "react-syntax-highlighter/dist/esm/styles/prism"; import SourceCard from "./SourceCard"; // Lazy-load MermaidBlock — deferred so mermaid.js doesn't bloat the initial bundle. const MermaidBlock = lazy(() => import("./MermaidBlock")); // ReactMarkdown renders fenced code blocks as
....
// If we override only `code`, ReactMarkdown wraps the whole thing in a , // giving
...— invalid HTML (pre can't be inside p). // // Fix: override `pre` to render just its children (no wrapper), so // SyntaxHighlighter's own
is the only one. Then in `code` we check
// whether it has a language class (block code) or not (inline code).
const mdComponents = {
// Wrap tables in a scrollable container so wide tables don't wrap cells
table({ children }) {
return {children}
;
},
// Strip the wrapper — SyntaxHighlighter adds its own
pre({ children }) {
return <>{children}>;
},
code({ className, children, ...props }) {
const lang = /language-(\w+)/.exec(className || "")?.[1];
if (lang === "diagram" || lang === "mermaid") {
// Agent drew a diagram — render as SVG via mermaid.js.
// We intercept both "diagram" (our custom tag) and "mermaid" (model's natural tag).
return (
Rendering diagram…
}>
);
}
if (lang) {
// Block code with a language tag → syntax-highlighted
return (
{String(children).replace(/\n$/, "")}
);
}
// Inline code → plain
return {children};
},
};
// Thought bubble — shows the LLM's reasoning before a tool call.
// isActive = this is the currently-streaming thought (last item while streaming).
// Past thoughts (agent already moved on) collapse to a one-liner — click to expand.
function AgentThought({ text, isActive }) {
const [expanded, setExpanded] = useState(false);
// Shared node — same ○ → structure as tool steps so both rows align
const node = (
→
);
// While streaming, show the full thought text (no collapse, no chevron)
if (isActive) {
return (
{node}
{/* agent-thought-body mirrors agent-step-body margin so text aligns with tool step content */}
{text}
);
}
// Past thought — one-line collapsed by default, click to expand
const preview = text.length > 120 ? text.slice(0, 120) + "…" : text;
return (
setExpanded(v => !v)}
>
{node}
{expanded ? text : preview}
);
}
// Convert tool+input into a short human-readable label shown in the step header.
// Reads like a sentence fragment so the trace feels like watching the agent think,
// not like reading a JSON dump.
function formatStepQuery(tool, input) {
if (!input) return "";
switch (tool) {
case "search_code": return input.query || JSON.stringify(input);
case "search_symbol": return input.symbol_name || JSON.stringify(input);
case "list_files": return input.path ? `${input.repo}/${input.path}` : (input.repo || JSON.stringify(input));
case "find_callers": return input.function_name || JSON.stringify(input);
case "get_file_chunk": return input.filepath
? `${input.filepath} (L${input.start_line}–${input.end_line})`
: JSON.stringify(input);
case "read_file": return input.filepath || JSON.stringify(input);
case "note": return input.key ? `${input.key}: ${input.value}` : JSON.stringify(input);
case "recall_notes": return "checking notes";
case "trace_calls": return input.symbol_name || JSON.stringify(input);
default: return input.query || input.name || JSON.stringify(input);
}
}
// Individual agent step — renders as a node in the connected timeline chain.
//
// Collapsed by default once a step is no longer the active one.
// isActive = this step is currently executing (isLast && streaming).
// Clicking a completed (non-active) step toggles its output open/closed.
function AgentStep({ step, isLast, icon, streaming }) {
const isActive = isLast && streaming;
const isPending = !step.output && isActive;
// manualExpand lets users re-open a completed step; resets when step becomes active again
const [manualExpand, setManualExpand] = useState(false);
const showOutput = isActive || manualExpand;
const isLong = step.output && step.output.length > 300;
const [outputExpanded, setOutputExpanded] = useState(false);
const toggle = () => {
if (!isActive) setManualExpand(v => !v);
};
return (
{/* Node dot on the vertical line + arrow connector */}
→
{/* Step body */}
{icon}
{step.tool}
{formatStepQuery(step.tool, step.input)}
{isPending && }
{!isActive && step.output && (
)}
{showOutput && step.output && (
<>
isLong && !outputExpanded && setOutputExpanded(true)}
>
{step.output}
{isLong && !outputExpanded && (
)}
{isLong && outputExpanded && (
)}
>
)}
);
}
// ToolCallTrace shows the agent's reasoning steps as a connected timeline —
// visually similar to how Claude Code shows "Agent → Bash → Read" with
// vertical lines connecting each step.
//
// DURING streaming: always expanded so user can watch the agent think live.
// AFTER completion: collapsible via the toggle header.
function ToolCallTrace({ steps, streaming, iterations, model }) {
const [expanded, setExpanded] = useState(true);
if (!steps || steps.length === 0) return null;
// Tool name → icon SVG for clean visual scanning (no emoji)
const toolIcon = {
search_code: ,
search_symbol: ,
list_files: ,
get_file_chunk: ,
read_file: ,
find_callers: ,
note: ,
recall_notes: ,
trace_calls: ,
};
const defaultIcon = ;
// Compute the index of the last non-thought step so we can pass isLast correctly
const lastToolIdx = steps.reduce((acc, s, i) => s.type !== "thought" ? i : acc, -1);
const stepsEl = (
{/* Vertical connector line running the full height */}
{steps.map((step, i) => {
if (step.type === "thought") {
// A thought is "active" (shown in full) only while it's the last item
// and the agent is still streaming — once the agent emits a tool call
// after it, the thought is "past" and collapses to a one-liner.
const isActiveThought = streaming && i === steps.length - 1;
return ;
}
return (
);
})}
);
return (
{expanded && stepsEl}
{/* When collapsed, show the first thought as a one-line summary so users can still see the agent's reasoning intent */}
{!expanded && !streaming && (() => {
const firstThought = steps.find(s => s.type === "thought");
if (!firstThought) return null;
return (
"{firstThought.text.length > 120 ? firstThought.text.slice(0, 120) + "…" : firstThought.text}"
);
})()}
);
}
// ConfidenceBadge — rendered after model-based grading completes.
// high = all claims confirmed in sources → green check (shown in pipeline bar only)
// medium = mostly supported, minor extrapolation → amber warning
// low = claims not backed by sources → red warning
const CONFIDENCE_CONFIG = {
high: { color: "#10b981", bg: "rgba(16,185,129,0.10)", icon: "✓", label: "High confidence" },
medium: { color: "#f59e0b", bg: "rgba(245,158,11,0.10)", icon: "◐", label: "Medium confidence" },
low: { color: "#ef4444", bg: "rgba(239,68,68,0.10)", icon: "⚠", label: "Low confidence" },
};
function ConfidenceBadge({ grade }) {
const cfg = CONFIDENCE_CONFIG[grade.confidence] || CONFIDENCE_CONFIG.medium;
return (
{cfg.icon} {cfg.label}
{grade.note && (
— {grade.note}
)}
);
}
// Copy-answer button — appears on hover over the assistant message.
// Copies the raw markdown text so developers can paste it into docs/code.
function CopyAnswerButton({ content }) {
const [copied, setCopied] = useState(false);
const handleCopy = useCallback(() => {
navigator.clipboard.writeText(content).then(() => {
setCopied(true);
setTimeout(() => setCopied(false), 1800);
});
}, [content]);
return (
);
}
const Message = forwardRef(function Message({ msg, onDiagramThis, onRetry, showRepo = false }, ref) {
const isUser = msg.role === "user";
return (
{isUser ? (
{msg.content}
) : (
<>
{/* Assistant avatar — ✦ for agent responses, code icon for RAG.
This matches the ✦ badge on agent sessions in the sidebar,
making the visual language consistent: ✦ = agent mode. */}
{/* All assistant content in a column wrapper */}
{/* Mode tag — always shown on assistant responses so chat history is scannable.
Uses msg.mode (set explicitly at creation, never mutated) so async callbacks
updating queryType/phase/model can never flip the label to the wrong mode. */}
{/* Mode tag — RAG only. Agent already has the ToolCallTrace header showing
"Agent · N iterations · model", so a second label would be redundant. */}
{msg.mode === "rag" && (
◎
RAG
{msg.queryType && (
· {msg.queryType}
)}
{msg.model && (
· {msg.model.split("/").pop()}
)}
)}
{/* Agent reasoning trace */}
{msg.toolCalls && msg.toolCalls.length > 0 && (
)}
{/* "Thinking…" shown before first tool call in agent mode */}
{msg.streaming && msg.currentTool === null && !msg.content && (!msg.toolCalls || msg.toolCalls.length === 0) && !msg.phase && (
Thinking…
)}
{/* RAG retrieval phase indicator — makes the invisible retrieval step visible.
"searching" = waiting for vector search to return sources.
"generating" = sources received, LLM is now streaming the answer. */}
{msg.streaming && msg.phase && (
{msg.phase === "searching"
? "Searching code…"
: `Found ${msg.sourceCount ?? "?"} source${msg.sourceCount !== 1 ? "s" : ""} · Generating answer…`
}
)}
{/* Rate-limit countdown banner — shown instead of a hard error */}
{msg.rateLimited && (
{msg.content}
{onRetry && msg.retryQuestion && (
)}
)}
{/* Answer bubble */}
{msg.content || " "}
{/* Show cursor whenever streaming, not just when no tool active */}
{msg.streaming && }
{/* Copy-answer button — visible on hover; lets devs paste the answer */}
{!msg.streaming && msg.content && }
{/* Pipeline provenance — shows every retrieval stage that fired for this answer.
Positioned HERE (before sources) so it's immediately visible after the answer,
not buried below N source cards. Quality features only shown when they ran. */}
{!msg.streaming && msg.queryType && !msg.iterations && (
{msg.pipeline?.hyde && (
<>
HyDE
→
>
)}
{msg.pipeline?.expanded > 0 && (
<>
+{msg.pipeline.expanded} expansions
→
>
)}
{msg.queryType} search
→
{msg.pipeline?.reranker === "cohere" ? "cohere re-ranked" : "re-ranked"}
→
{msg.pipeline?.parent_docs > 0 && (
<>
↕ {msg.pipeline.parent_docs} expanded
→
>
)}
{msg.sources?.length ?? 0} source{(msg.sources?.length ?? 0) !== 1 ? "s" : ""}
→
generated
{msg.model && (
<>
·
{msg.model.split("/").pop()}
>
)}
{msg.grade && msg.grade.confidence !== "unknown" && (
<>
→
{msg.grade.confidence === "high" ? "✓" : msg.grade.confidence === "low" ? "⚠" : "◐"} {msg.grade.confidence}
>
)}
)}
{/* Badges + Sources — query type shown as sources header for context */}
{/* (agent iteration count is shown in the ToolCallTrace header above) */}
{msg.sources && msg.sources.length > 0 && !msg.streaming && (
{msg.sources.length} source{msg.sources.length > 1 ? "s" : ""}
{msg.queryType && !msg.iterations && (
{msg.queryType}
)}
{msg.sources.map((s, i) => (
))}
{/* "Diagram this →" button — switches to diagram tab with focused-files context */}
{onDiagramThis && (
)}
)}
{/* Query type badge when no sources (e.g. factual answer with no retrieved chunks) */}
{!msg.streaming && !msg.iterations && msg.queryType && !(msg.sources?.length > 0) && (
{msg.queryType}
)}
{/* Standalone confidence badge for medium/low — shows the note text */}
{msg.grade && msg.grade.confidence !== "unknown" && msg.grade.confidence !== "high" && (
)}
>
)}
);
});
export default Message;