solvox / src /renderer /pages /VoicePage.tsx
muthuk1's picture
🚀 Final: +ContactsPage +ScanPage +Sparklines, types.ts synced, TS 0 errors, Coinbase design, complete README
9ff7e0c verified
import React, { useState, useRef, useEffect } from 'react';
import { PipelineTrace } from '../components/ui/index';
interface Props { aiStatus: any; }
interface Msg { id: string; role: 'user' | 'assistant' | 'system'; text: string; pipeline?: any[]; actions?: any[]; ts: Date; }
const SUGGESTIONS = ['What is my balance?', 'Send 5 SOL to alice.sol', 'Show recent transactions', 'Help me understand gas fees'];
export default function VoicePage({ aiStatus }: Props) {
const [msgs, setMsgs] = useState<Msg[]>([
{ id: '0', role: 'assistant', text: "I'm your SolVox AI assistant — running 100% locally via 6 QVAC modules. Ask me to send tokens, check balances, search transactions, or explain anything about Solana.", pipeline: [], actions: [], ts: new Date() },
]);
const [input, setInput] = useState('');
const [recording, setRecording] = useState(false);
const [processing, setProcessing] = useState(false);
const [wave, setWave] = useState<number[]>(new Array(40).fill(1));
const recRef = useRef<MediaRecorder | null>(null);
const chunks = useRef<Blob[]>([]);
const endRef = useRef<HTMLDivElement>(null);
const analyser = useRef<AnalyserNode | null>(null);
const raf = useRef<number | null>(null);
useEffect(() => { endRef.current?.scrollIntoView({ behavior: 'smooth' }); }, [msgs]);
const startRec = async () => {
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: { sampleRate: 16000, channelCount: 1, echoCancellation: true, noiseSuppression: true } });
const ctx = new AudioContext(); const src = ctx.createMediaStreamSource(stream); const a = ctx.createAnalyser(); a.fftSize = 128; src.connect(a); analyser.current = a;
const tick = () => { if (!analyser.current) return; const d = new Uint8Array(analyser.current.frequencyBinCount); analyser.current.getByteFrequencyData(d); setWave(Array.from(d).slice(0, 40).map(v => Math.max(1, v / 8))); raf.current = requestAnimationFrame(tick); }; tick();
const rec = new MediaRecorder(stream, { mimeType: 'audio/webm' }); chunks.current = [];
rec.ondataavailable = e => { if (e.data.size > 0) chunks.current.push(e.data); };
rec.onstop = async () => { stream.getTracks().forEach(t => t.stop()); if (raf.current) cancelAnimationFrame(raf.current); setWave(new Array(40).fill(1)); const b = new Blob(chunks.current, { type: 'audio/webm' }); processVoice(await b.arrayBuffer()); };
recRef.current = rec; rec.start(); setRecording(true);
} catch { add('system', 'Microphone access denied.'); }
};
const stopRec = () => { if (recRef.current && recording) { recRef.current.stop(); setRecording(false); } };
const processVoice = async (data: ArrayBuffer) => {
setProcessing(true);
try {
if (window.solvox) {
const r = await window.solvox.ai.processVoice(data);
if (r.success) {
add('user', r.transcription || '[voice]');
add('assistant', r.agentResult?.response || 'Done.', r.pipelineSteps, r.agentResult?.actions);
if (r.responseAudio) { const u = URL.createObjectURL(new Blob([r.responseAudio], { type: 'audio/wav' })); new Audio(u).play().catch(() => {}); }
} else add('system', r.error || 'Voice processing failed');
} else { add('user', '[voice — dev]'); add('assistant', 'QVAC models needed. Type commands instead.'); }
} catch (e: any) { add('system', e.message); }
setProcessing(false);
};
const handleSend = async () => {
if (!input.trim()) return; const text = input.trim(); setInput(''); add('user', text); setProcessing(true);
try {
if (window.solvox) {
const r = await window.solvox.ai.chat(text);
if (r.success) add('assistant', r.response || '', r.pipelineSteps, r.actions);
else add('assistant', r.error || 'Could not process.');
} else add('assistant', `[Dev] "${text}" — needs QVAC models.`);
} catch (e: any) { add('system', e.message); }
setProcessing(false);
};
const add = (role: Msg['role'], text: string, pipeline?: any[], actions?: any[]) => {
setMsgs(p => [...p, { id: Date.now().toString(36), role, text, pipeline, actions, ts: new Date() }]);
};
const mods = aiStatus ? [aiStatus.llm, aiStatus.transcription, aiStatus.tts, aiStatus.embed, aiStatus.translation, aiStatus.ocr].filter(Boolean).length : 0;
return (
<div className="flex flex-col h-full max-w-content mx-auto px-8 py-6">
{/* Header */}
<div className="flex items-center justify-between mb-4">
<div>
<h2 className="text-title-lg display-text text-ink">Voice AI Assistant</h2>
<p className="text-body-sm text-body">Powered by 6 QVAC packages · All inference local</p>
</div>
<span className={`badge-pill ${mods >= 4 ? 'badge-pill-green' : mods > 0 ? 'badge-pill-blue' : ''}`}>
{mods >= 4 ? 'AI ONLINE' : mods > 0 ? 'PARTIAL' : 'LOADING'}
</span>
</div>
{/* Chat */}
<div className="flex-1 overflow-y-auto card mb-4 space-y-4" style={{ padding: '24px' }}>
{msgs.map(m => (
<div key={m.id} className={`flex ${m.role === 'user' ? 'justify-end' : 'justify-start'} page-enter`}>
<div className={`max-w-[75%] ${
m.role === 'user' ? 'bg-primary text-on-primary rounded-xl rounded-br-sm px-4 py-3' :
m.role === 'system' ? 'bg-surface-soft text-muted border border-hairline rounded-xl px-4 py-3' :
'bg-surface-soft text-ink rounded-xl rounded-bl-sm px-4 py-3'
}`}>
{m.role === 'assistant' && <div className="text-caption-strong text-primary mb-1">SolVox AI</div>}
<p className="text-body-sm whitespace-pre-wrap">{m.text}</p>
{m.actions && m.actions.length > 0 && (
<div className="flex flex-wrap gap-1 mt-2">
{m.actions.map((a: any, i: number) => (
<span key={i} className="badge-pill-blue badge-pill text-[10px]">{a.tool}</span>
))}
</div>
)}
{m.pipeline && m.pipeline.length > 0 && <PipelineTrace steps={m.pipeline} />}
<div className="text-caption text-muted-soft mt-1.5">{m.ts.toLocaleTimeString()}</div>
</div>
</div>
))}
{processing && (
<div className="flex justify-start">
<div className="bg-surface-soft rounded-xl px-4 py-3 flex items-center gap-2">
<div className="w-2 h-2 rounded-full bg-primary animate-pulse" />
<span className="text-body-sm text-muted">Processing locally…</span>
</div>
</div>
)}
<div ref={endRef} />
</div>
{/* Suggestions */}
{msgs.length <= 2 && !recording && (
<div className="flex flex-wrap gap-2 mb-3">
{SUGGESTIONS.map(s => (
<button key={s} onClick={() => setInput(s)} className="btn-secondary text-body-sm py-1.5 px-3">{s}</button>
))}
</div>
)}
{/* Waveform */}
{recording && (
<div className="bg-surface-soft rounded-xl p-3 mb-3 flex items-center justify-center gap-px h-12">
{wave.map((h, i) => (
<div key={i} className="w-[3px] rounded-full bg-primary transition-all duration-75" style={{ height: `${h}px`, opacity: 0.3 + h / 40 }} />
))}
</div>
)}
{/* Input */}
<div className="flex items-center gap-3">
<button onMouseDown={startRec} onMouseUp={stopRec} onMouseLeave={stopRec} onTouchStart={startRec} onTouchEnd={stopRec} disabled={processing}
className={`w-11 h-11 rounded-pill flex items-center justify-center transition-colors ${recording ? 'bg-semantic-down' : 'bg-primary'} disabled:opacity-40`}>
<svg width="18" height="18" viewBox="0 0 24 24" fill="white">
{recording ? <rect x="6" y="6" width="12" height="12" rx="2" /> : <><path d="M12 1a3 3 0 0 0-3 3v8a3 3 0 0 0 6 0V4a3 3 0 0 0-3-3z"/><path d="M19 10v2a7 7 0 0 1-14 0v-2" fill="none" stroke="white" strokeWidth="2" strokeLinecap="round"/><line x1="12" y1="19" x2="12" y2="23" fill="none" stroke="white" strokeWidth="2" strokeLinecap="round"/></>}
</svg>
</button>
<div className="flex-1 relative">
<input value={input} onChange={e => setInput(e.target.value)} onKeyDown={e => e.key === 'Enter' && handleSend()}
placeholder={recording ? 'Listening…' : 'Type a command or hold mic…'} className="input-field pr-20" disabled={processing || recording} />
<button onClick={handleSend} disabled={!input.trim() || processing} className="absolute right-1.5 top-1/2 -translate-y-1/2 btn-text text-body-sm disabled:opacity-20">Send</button>
</div>
</div>
</div>
);
}