Álvaro Valenzuela Valdes
feat: implement Llama 3.2 Vision support in chatbot for image analysis
46928cd | "use client"; | |
| import { useState, useRef, useEffect } from "react"; | |
| import type { Tender, CompanyProfile } from "../lib/types"; | |
| import { uploadDocument, getAPIBase } from "../lib/api"; | |
| type Message = { | |
| role: "user" | "assistant"; | |
| content: string; | |
| agent?: string; | |
| }; | |
| type Props = { | |
| tender: Tender; | |
| companyProfile: CompanyProfile; | |
| }; | |
| const agents = [ | |
| { id: "legal", name: "Dra. Legal", avatar: "⚖️", color: "text-amber-400" }, | |
| { id: "tech", name: "Ing. Tech", avatar: "👨💻", color: "text-cyan" }, | |
| { id: "risk", name: "Sra. Estrategia", avatar: "🕵️♀️", color: "text-purple-400" }, | |
| ]; | |
| const models = [ | |
| "Llama-3.3-70B (Groq)", | |
| "Llama-3.1-8B (Groq)", | |
| "Llama-3.2-11B-Vision (Groq)", | |
| "Gemini 2.5 Flash", | |
| "Qwen-2.5 (Featherless)", | |
| ]; | |
| export default function AgentChat({ tender, companyProfile }: Props) { | |
| const [messages, setMessages] = useState<Message[]>([]); | |
| const [input, setInput] = useState(""); | |
| const [selectedAgent, setSelectedAgent] = useState(agents[0]); | |
| const [selectedModel, setSelectedModel] = useState(models[0]); | |
| const [isLoading, setIsLoading] = useState(false); | |
| const [isTyping, setIsTyping] = useState(false); | |
| const [isUploading, setIsUploading] = useState(false); | |
| const [isListening, setIsListening] = useState(false); | |
| const [contextText, setContextText] = useState(""); | |
| const [attachedFile, setAttachedFile] = useState<File | null>(null); | |
| const scrollRef = useRef<HTMLDivElement>(null); | |
| const fileInputRef = useRef<HTMLInputElement>(null); | |
| const startSpeechRecognition = () => { | |
| const SpeechRecognition = (window as any).SpeechRecognition || (window as any).webkitSpeechRecognition; | |
| if (!SpeechRecognition) { | |
| alert("Speech recognition not supported in this browser."); | |
| return; | |
| } | |
| const recognition = new SpeechRecognition(); | |
| recognition.lang = "es-CL"; | |
| recognition.interimResults = false; | |
| recognition.onstart = () => setIsListening(true); | |
| recognition.onend = () => setIsListening(false); | |
| recognition.onresult = (event: any) => { | |
| const transcript = event.results[0][0].transcript; | |
| setInput(transcript); | |
| // Optional: Auto-send after voice command | |
| // handleSend(transcript); | |
| }; | |
| recognition.start(); | |
| }; | |
| const suggestedQuestions = [ | |
| "Summarize the main requirements", | |
| "Identify legal risks for my company", | |
| "How does my experience fit here?", | |
| "Generate a technical summary", | |
| ]; | |
| const simulateTyping = (text: string, agentName: string) => { | |
| if (!text) return; // Don't simulate empty text | |
| setIsTyping(true); | |
| let currentText = ""; | |
| const words = text.split(" "); | |
| let i = 0; | |
| const interval = setInterval(() => { | |
| if (i < words.length) { | |
| currentText += (i === 0 ? "" : " ") + words[i]; | |
| setMessages(prev => { | |
| const last = prev[prev.length - 1]; | |
| if (last && last.role === 'assistant' && last.agent === agentName) { | |
| return [...prev.slice(0, -1), { ...last, content: currentText }]; | |
| } | |
| return [...prev, { role: 'assistant', content: currentText, agent: agentName }]; | |
| }); | |
| i++; | |
| } else { | |
| clearInterval(interval); | |
| setIsTyping(false); | |
| } | |
| }, 20); // Faster typing | |
| }; | |
| useEffect(() => { | |
| if (scrollRef.current) { | |
| scrollRef.current.scrollTop = scrollRef.current.scrollHeight; | |
| } | |
| }, [messages]); | |
| const handleSend = async (overrideInput?: string) => { | |
| const messageToSend = overrideInput || input; | |
| if (!messageToSend.trim() || isLoading) return; | |
| let imageBase64 = ""; | |
| if (attachedFile && attachedFile.type.startsWith("image/")) { | |
| setIsUploading(true); | |
| try { | |
| imageBase64 = await new Promise((resolve, reject) => { | |
| const reader = new FileReader(); | |
| reader.onload = () => resolve(reader.result as string); | |
| reader.onerror = reject; | |
| reader.readAsDataURL(attachedFile); | |
| }); | |
| } catch (err) { | |
| console.error("Error converting image:", err); | |
| } | |
| setIsUploading(false); | |
| } | |
| const userMsg: Message = { role: "user", content: messageToSend, agent: "User" }; | |
| setMessages(prev => [...prev, userMsg]); | |
| if (!overrideInput) setInput(""); | |
| setAttachedFile(null); | |
| setIsLoading(true); | |
| try { | |
| const finalMessage = imageBase64 | |
| ? `${messageToSend}\n\nIMAGE_DATA:${imageBase64}` | |
| : contextText ? `[DOC CONTEXT: ${contextText.slice(0, 3000)}]\n\nUSER QUESTION: ${messageToSend}` : messageToSend; | |
| const response = await fetch(`${getAPIBase()}/api/chat`, { | |
| method: "POST", | |
| headers: { "Content-Type": "application/json" }, | |
| body: JSON.stringify({ | |
| tender, | |
| company_profile: companyProfile, | |
| message: finalMessage, | |
| agent: selectedAgent.id, | |
| model: selectedModel, | |
| history: messages.map(({role, content, agent}) => ({role, content, agent_name: agent})), | |
| }), | |
| }); | |
| if (!response.ok) throw new Error("Failed to chat"); | |
| const data = await response.json(); | |
| simulateTyping(data.response, selectedAgent.name); | |
| } catch (error) { | |
| console.error(error); | |
| setMessages(prev => [...prev, { role: "assistant", content: "⚠️ Error connecting to the agent. Please try again.", agent: selectedAgent.name }]); | |
| } finally { | |
| setIsLoading(false); | |
| } | |
| }; | |
| const handleFileUpload = async (e: React.ChangeEvent<HTMLInputElement>) => { | |
| if (e.target.files && e.target.files[0]) { | |
| const file = e.target.files[0]; | |
| if (file.type.startsWith("image/")) { | |
| setAttachedFile(file); | |
| setMessages(prev => [...prev, { role: "user", content: `🖼️ Attached image: ${file.name}` }]); | |
| return; | |
| } | |
| setIsUploading(true); | |
| try { | |
| const result = await uploadDocument(file); | |
| setContextText(prev => prev + "\n" + result.text); | |
| setMessages(prev => [...prev, { role: "user", content: `📎 Attached document: ${file.name}` }]); | |
| simulateTyping(`He analizado el documento "${file.name}". ¿Qué te gustaría saber sobre su contenido?`, selectedAgent.name); | |
| } catch (error) { | |
| console.error(error); | |
| alert("Error uploading document."); | |
| } finally { | |
| setIsUploading(false); | |
| } | |
| } | |
| }; | |
| const handleSuggestedClick = (question: string) => { | |
| setInput(question); | |
| }; | |
| return ( | |
| <div className="flex flex-col h-[550px] sm:h-[600px] md:h-[650px] lg:h-[700px] glass-card rounded-[2rem] overflow-hidden border border-white/10 bg-slate-900/60 backdrop-blur-xl shadow-2xl transition-all duration-500"> | |
| <input | |
| type="file" | |
| ref={fileInputRef} | |
| onChange={handleFileUpload} | |
| className="hidden" | |
| accept=".pdf,.docx,.doc,.txt,image/*" | |
| /> | |
| {/* Chat Header */} | |
| <div className="p-4 md:p-6 border-b border-white/5 flex flex-col md:flex-row md:items-center justify-between gap-4 bg-white/[0.03]"> | |
| <div className="flex items-center gap-4"> | |
| <div className={`text-2xl md:text-3xl transition-all duration-500 relative ${isLoading || isTyping ? 'scale-110' : ''}`}> | |
| {selectedAgent.avatar} | |
| {(isLoading || isTyping || isUploading) && ( | |
| <div className="absolute inset-0 bg-purple-500/20 blur-xl rounded-full animate-pulse" /> | |
| )} | |
| </div> | |
| <div> | |
| <h4 className="text-white text-sm md:text-base font-bold flex items-center gap-2"> | |
| {selectedAgent.name} | |
| {(isLoading || isTyping || isUploading) && <span className="h-1.5 w-1.5 bg-green-500 rounded-full animate-pulse shadow-[0_0_8px_rgba(34,197,94,0.8)]" />} | |
| </h4> | |
| <p className="text-[9px] text-slate-500 uppercase tracking-widest font-black">Expert Consultant</p> | |
| </div> | |
| </div> | |
| <div className="flex items-center gap-2 md:gap-3 overflow-x-auto no-scrollbar"> | |
| <select | |
| value={selectedAgent.id} | |
| onChange={(e) => setSelectedAgent(agents.find(a => a.id === e.target.value) || agents[0])} | |
| className="bg-white/5 border border-white/10 rounded-xl px-2 md:px-3 py-1.5 text-[9px] md:text-[10px] uppercase font-black tracking-widest text-slate-400 hover:text-white transition-all cursor-pointer outline-none focus:border-purple-500/50" | |
| > | |
| {agents.map(a => <option key={a.id} value={a.id} className="bg-slate-900">{a.name}</option>)} | |
| </select> | |
| <div className="h-6 w-px bg-white/5 shrink-0" /> | |
| <select | |
| value={selectedModel} | |
| onChange={(e) => setSelectedModel(e.target.value)} | |
| className="bg-white/5 border border-white/10 rounded-xl px-2 md:px-3 py-1.5 text-[9px] md:text-[10px] uppercase font-black tracking-widest text-slate-400 hover:text-white transition-all cursor-pointer outline-none focus:border-purple-500/50" | |
| > | |
| {models.map(m => <option key={m} value={m} className="bg-slate-900">{m}</option>)} | |
| </select> | |
| </div> | |
| </div> | |
| {/* Messages Area */} | |
| <div | |
| ref={scrollRef} | |
| className="flex-1 overflow-y-auto p-6 space-y-6 custom-scrollbar bg-black/20" | |
| > | |
| {messages.length === 0 && ( | |
| <div className="h-full flex flex-col items-center justify-center text-center space-y-4 opacity-40"> | |
| <div className="text-5xl">💬</div> | |
| <p className="text-slate-400 text-sm max-w-xs"> | |
| Hi! I'm your {selectedAgent.name}. Ask me anything about this tender's requirements, risks, or strategy. | |
| </p> | |
| </div> | |
| )} | |
| {messages.map((msg, i) => ( | |
| <div key={i} className={`flex ${msg.role === 'user' ? 'justify-end' : 'justify-start'}`}> | |
| <div className={`max-w-[80%] rounded-2xl px-6 py-4 text-[13px] shadow-lg ${ | |
| msg.role === 'user' | |
| ? 'bg-purple-600 text-white rounded-tr-none' | |
| : 'bg-white/10 text-white border border-white/20 rounded-tl-none backdrop-blur-md' | |
| }`}> | |
| {msg.role === 'assistant' && ( | |
| <div className="text-[10px] font-black uppercase text-purple-400 mb-1 tracking-widest"> | |
| {msg.agent} | |
| </div> | |
| )} | |
| <p className="leading-relaxed whitespace-pre-wrap min-h-[1.25em] text-white/90">{msg.content}</p> | |
| </div> | |
| </div> | |
| ))} | |
| {isLoading && !isTyping && ( | |
| <div className="flex justify-start"> | |
| <div className="bg-white/5 rounded-2xl rounded-tl-none px-5 py-3 border border-white/10"> | |
| <div className="flex gap-1.5"> | |
| <div className="w-1.5 h-1.5 bg-purple-500 rounded-full animate-bounce" style={{ animationDelay: '0ms' }} /> | |
| <div className="w-1.5 h-1.5 bg-purple-500 rounded-full animate-bounce" style={{ animationDelay: '200ms' }} /> | |
| <div className="w-1.5 h-1.5 bg-purple-500 rounded-full animate-bounce" style={{ animationDelay: '400ms' }} /> | |
| </div> | |
| </div> | |
| </div> | |
| )} | |
| </div> | |
| {/* Suggested Questions */} | |
| {messages.length < 3 && !isLoading && !isTyping && ( | |
| <div className="px-6 pb-4 bg-transparent overflow-x-auto no-scrollbar"> | |
| <div className="flex gap-2 whitespace-nowrap"> | |
| {suggestedQuestions.map((q, i) => ( | |
| <button | |
| key={i} | |
| onClick={() => handleSuggestedClick(q)} | |
| className="bg-white/5 border border-white/10 rounded-full px-4 py-2 text-[10px] font-bold text-slate-400 hover:text-white hover:bg-white/10 hover:border-purple-500/50 transition-all active:scale-95" | |
| > | |
| {q} | |
| </button> | |
| ))} | |
| </div> | |
| </div> | |
| )} | |
| {/* Input Area */} | |
| <div className="p-3 md:p-6 bg-white/5 border-t border-white/5"> | |
| <div className="flex gap-1.5 md:gap-3 items-center"> | |
| <button | |
| onClick={() => fileInputRef.current?.click()} | |
| disabled={isUploading || isLoading} | |
| className="w-9 h-9 md:w-12 md:h-12 rounded-xl md:rounded-2xl bg-white/5 border border-white/10 text-slate-400 flex items-center justify-center transition-all hover:bg-white/10 active:scale-95 disabled:opacity-30 shrink-0" | |
| title="Attach Document" | |
| > | |
| <span className={`text-lg md:text-xl ${isUploading ? 'animate-spin' : ''}`}>{isUploading ? '⌛' : '📎'}</span> | |
| </button> | |
| <input | |
| type="text" | |
| value={input} | |
| onChange={(e) => setInput(e.target.value)} | |
| onKeyDown={(e) => e.key === 'Enter' && handleSend()} | |
| placeholder={isUploading ? "Uploading..." : `Message...`} | |
| disabled={isUploading} | |
| className="flex-1 min-w-0 bg-black/40 border border-white/10 rounded-xl md:rounded-2xl px-3 md:px-5 py-2.5 md:py-3 text-white text-xs md:text-sm placeholder:text-slate-600 focus:outline-none focus:ring-2 focus:ring-purple-500/40 transition-all disabled:opacity-50" | |
| /> | |
| <button | |
| onClick={startSpeechRecognition} | |
| disabled={isListening || isLoading || isUploading} | |
| className={`w-9 h-9 md:w-12 md:h-12 rounded-xl md:rounded-2xl flex items-center justify-center transition-all active:scale-95 disabled:opacity-30 border shrink-0 ${ | |
| isListening | |
| ? 'bg-red-500/20 border-red-500 shadow-[0_0_20px_rgba(239,68,68,0.4)] animate-pulse' | |
| : 'bg-white/5 border-white/10 text-slate-400 hover:bg-white/10' | |
| }`} | |
| title="Voice Command" | |
| > | |
| <span className="text-lg md:text-xl">{isListening ? '🛑' : '🎙️'}</span> | |
| </button> | |
| <button | |
| onClick={() => handleSend()} | |
| disabled={!input.trim() || isLoading || isUploading} | |
| className="w-9 h-9 md:w-12 md:h-12 rounded-xl md:rounded-2xl premium-gradient text-white flex items-center justify-center transition-all active:scale-95 disabled:opacity-30 shadow-lg shadow-purple-500/20 shrink-0" | |
| > | |
| <span className="text-lg md:text-xl">✈️</span> | |
| </button> | |
| </div> | |
| </div> | |
| </div> | |
| ); | |
| } | |