jashdoshi77 commited on
Commit
c27eaf1
Β·
0 Parent(s):

Initial commit: AI Consultant Chatbot for HF Spaces

Browse files
.dockerignore ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__
2
+ *.pyc
3
+ *.pyo
4
+ .env
5
+ .git
6
+ .gitignore
7
+ *.db
8
+ test_*.py
9
+ test_*.json
10
+ test_*.txt
11
+ *.md
12
+ .agent/
13
+ .gemini/
.gitignore ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__/
2
+ *.pyc
3
+ *.pyo
4
+ .env
5
+ *.db
6
+ data/sessions.db
7
+ test_*.py
8
+ test_*.json
9
+ test_*.txt
10
+ .agent/
11
+ .gemini/
Dockerfile ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.11-slim
2
+
3
+ # Set working directory
4
+ WORKDIR /app
5
+
6
+ # Install dependencies
7
+ COPY backend/requirements.txt ./requirements.txt
8
+ RUN pip install --no-cache-dir -r requirements.txt
9
+
10
+ # Copy project files
11
+ COPY backend/ ./backend/
12
+ COPY frontend/ ./frontend/
13
+ COPY data/ ./data/ 2>/dev/null || mkdir -p ./data
14
+
15
+ # HF Spaces runs as user with uid 1000
16
+ RUN useradd -m -u 1000 user
17
+ RUN mkdir -p /app/data && chown -R user:user /app
18
+ USER user
19
+
20
+ # HF Spaces expects port 7860
21
+ EXPOSE 7860
22
+
23
+ # Run the FastAPI app from the backend directory
24
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860", "--app-dir", "backend"]
README.md ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Client Chatbot
3
+ emoji: πŸ’¬
4
+ colorFrom: blue
5
+ colorTo: purple
6
+ sdk: docker
7
+ pinned: false
8
+ ---
9
+
10
+ # AI Consultant Chatbot
11
+
12
+ A domain-agnostic consulting chatbot powered by LangGraph + Groq (Llama 3.3 70B).
13
+
14
+ ## Features
15
+ - Multi-turn diagnostic conversations
16
+ - 5-phase consulting flow (Discovery β†’ Exploration β†’ Constraints β†’ Solution β†’ Refinement)
17
+ - Detailed, actionable final recommendations
18
+ - Session management with SQLite persistence
backend/app.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """FastAPI application β€” consultant chatbot microservice.
2
+
3
+ Endpoints:
4
+ POST /chat β€” Main chat endpoint
5
+ GET /sessions β€” List all sessions
6
+ POST /sessions/new β€” Create a new session
7
+ GET /sessions/{session_id} β€” Get session details + history
8
+ DELETE /sessions/{session_id} β€” Delete a session
9
+ GET /health β€” Health check
10
+
11
+ The frontend is served as static files from ../frontend/.
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import os
17
+ import uuid
18
+ from contextlib import asynccontextmanager
19
+
20
+ from fastapi import FastAPI, HTTPException
21
+ from fastapi.middleware.cors import CORSMiddleware
22
+ from fastapi.responses import FileResponse
23
+ from fastapi.staticfiles import StaticFiles
24
+
25
+ from models import (
26
+ ChatRequest,
27
+ ChatResponse,
28
+ Message,
29
+ NewSessionResponse,
30
+ Phase,
31
+ SessionInfo,
32
+ SessionState,
33
+ )
34
+ from storage import delete_session, init_db, list_sessions, load_session, save_session
35
+ from graph import run_consultation
36
+
37
+
38
+ # ---------------------------------------------------------------------------
39
+ # Greeting message β€” shown at the start of every new session
40
+ # ---------------------------------------------------------------------------
41
+
42
+ GREETING_MESSAGE = (
43
+ "**Welcome! I'm your AI consultant.** πŸ‘‹\n\n"
44
+ "I help people think through complex problems β€” whether it's "
45
+ "**business strategy, product design, technical architecture, operations, "
46
+ "or anything else.**\n\n"
47
+ "Here's how our conversation will work:\n"
48
+ "- You describe your challenge or goal\n"
49
+ "- I'll ask smart, targeted questions β€” and share insights along the way\n"
50
+ "- Together, we'll converge on a clear, actionable solution\n\n"
51
+ "**So β€” what's on your mind? What problem are you trying to solve?**"
52
+ )
53
+
54
+
55
+ # ---------------------------------------------------------------------------
56
+ # App lifecycle
57
+ # ---------------------------------------------------------------------------
58
+
59
+ @asynccontextmanager
60
+ async def lifespan(app: FastAPI):
61
+ await init_db()
62
+ yield
63
+
64
+
65
+ app = FastAPI(
66
+ title="Consultant Chatbot API",
67
+ description="Domain-agnostic consulting chatbot powered by LangGraph + Groq",
68
+ version="1.0.0",
69
+ lifespan=lifespan,
70
+ )
71
+
72
+ # CORS β€” allow all origins for microservice usage
73
+ app.add_middleware(
74
+ CORSMiddleware,
75
+ allow_origins=["*"],
76
+ allow_credentials=True,
77
+ allow_methods=["*"],
78
+ allow_headers=["*"],
79
+ )
80
+
81
+
82
+ # ---------------------------------------------------------------------------
83
+ # Chat endpoint
84
+ # ---------------------------------------------------------------------------
85
+
86
+ @app.post("/chat", response_model=ChatResponse)
87
+ async def chat(req: ChatRequest):
88
+ """Process a user message and return the consultant's response."""
89
+ # Load or create session
90
+ session = await load_session(req.session_id)
91
+ if session is None:
92
+ session = SessionState(session_id=req.session_id)
93
+
94
+ # Run consultation graph
95
+ result = await run_consultation(session, req.message)
96
+
97
+ # Persist updated state
98
+ await save_session(req.session_id, result["session_state"])
99
+
100
+ return ChatResponse(
101
+ reply=result["assistant_reply"],
102
+ phase=result["new_phase"],
103
+ confidence=result["new_confidence"],
104
+ understanding=result["new_understanding"],
105
+ )
106
+
107
+
108
+ # ---------------------------------------------------------------------------
109
+ # Session management endpoints
110
+ # ---------------------------------------------------------------------------
111
+
112
+ @app.get("/sessions")
113
+ async def get_sessions():
114
+ """List all active sessions."""
115
+ sessions = await list_sessions()
116
+ return {"sessions": sessions}
117
+
118
+
119
+ @app.post("/sessions/new")
120
+ async def create_session():
121
+ """Create a new session with an opening greeting message."""
122
+ session_id = str(uuid.uuid4())
123
+ state = SessionState(
124
+ session_id=session_id,
125
+ messages=[Message(role="assistant", content=GREETING_MESSAGE)],
126
+ )
127
+ await save_session(session_id, state)
128
+ return {
129
+ "session_id": session_id,
130
+ "greeting": GREETING_MESSAGE,
131
+ }
132
+
133
+
134
+ @app.get("/sessions/{session_id}")
135
+ async def get_session(session_id: str):
136
+ """Get full session details including conversation history."""
137
+ session = await load_session(session_id)
138
+ if session is None:
139
+ raise HTTPException(status_code=404, detail="Session not found")
140
+ return session.model_dump()
141
+
142
+
143
+ @app.delete("/sessions/{session_id}")
144
+ async def remove_session(session_id: str):
145
+ """Delete a session."""
146
+ deleted = await delete_session(session_id)
147
+ if not deleted:
148
+ raise HTTPException(status_code=404, detail="Session not found")
149
+ return {"status": "deleted", "session_id": session_id}
150
+
151
+
152
+ # ---------------------------------------------------------------------------
153
+ # Health check
154
+ # ---------------------------------------------------------------------------
155
+
156
+ @app.get("/health")
157
+ async def health():
158
+ return {"status": "healthy", "service": "consultant-chatbot"}
159
+
160
+
161
+ # ---------------------------------------------------------------------------
162
+ # Serve frontend static files
163
+ # ---------------------------------------------------------------------------
164
+
165
+ FRONTEND_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "frontend")
166
+
167
+ if os.path.isdir(FRONTEND_DIR):
168
+ @app.get("/")
169
+ async def serve_index():
170
+ return FileResponse(os.path.join(FRONTEND_DIR, "index.html"))
171
+
172
+ app.mount("/static", StaticFiles(directory=FRONTEND_DIR), name="static")
backend/graph.py ADDED
@@ -0,0 +1,931 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """LangGraph-based consultation state machine.
2
+
3
+ Implements a 5-phase consulting flow controlled by a confidence score.
4
+
5
+ CODE-LEVEL QUALITY ENFORCEMENT:
6
+ - Simplified question category tracker (3 categories instead of 6)
7
+ - Anti-repetition engine: detects sentence overlap and forces re-generation
8
+ - Anti-hallucination check: verifies claims against conversation history
9
+ - Deferral counter: tracks "I don't know" responses and switches to advice mode
10
+ - Turn-aware instructions: adjusts behavior based on conversation stage
11
+ - Understanding validation: ensures the LLM doesn't fabricate facts
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import json
17
+ import re
18
+ from difflib import SequenceMatcher
19
+ from typing import Any, TypedDict
20
+
21
+ from langchain_core.messages import HumanMessage, SystemMessage
22
+
23
+ from llm import llm
24
+ from models import Message, Phase, SessionState, PHASE_ORDER, PHASE_THRESHOLDS
25
+ from prompts import PHASE_PROMPTS
26
+
27
+
28
+ # ---------------------------------------------------------------------------
29
+ # Graph state type
30
+ # ---------------------------------------------------------------------------
31
+
32
+ class GraphState(TypedDict):
33
+ session_state: SessionState
34
+ user_message: str
35
+ assistant_reply: str
36
+ new_confidence: float
37
+ new_understanding: str
38
+ new_phase: Phase
39
+
40
+
41
+ # ---------------------------------------------------------------------------
42
+ # Question Category Tracking System (SIMPLIFIED)
43
+ # ---------------------------------------------------------------------------
44
+
45
+ # Only 3 broad categories β€” enough to guide without forcing deep dives
46
+ QUESTION_CATEGORIES = {
47
+ "business_basics": {
48
+ "label": "Business Basics",
49
+ "description": "What they do, what they sell/offer, rough scale",
50
+ "keywords": ["sell", "product", "service", "business", "store", "shop",
51
+ "company", "revenue", "team", "employee", "people",
52
+ "industry", "offer", "work", "startup", "project"],
53
+ "prompt": "You could ask about their business basics β€” what they do, how big the operation is.",
54
+ },
55
+ "problem_details": {
56
+ "label": "Problem Details",
57
+ "description": "What exactly is going wrong, since when, how bad",
58
+ "keywords": ["problem", "issue", "challenge", "slow", "decline",
59
+ "month", "year", "since", "affect", "impact",
60
+ "drop", "worse", "bad", "struggle", "difficult"],
61
+ "prompt": "You could ask more about the problem β€” what exactly is happening, how long, how bad.",
62
+ },
63
+ "data_and_attempts": {
64
+ "label": "Data & Past Attempts",
65
+ "description": "Where their data lives and what they've tried already",
66
+ "keywords": ["data", "excel", "spreadsheet", "track", "record",
67
+ "tried", "attempt", "before", "already", "didn't work",
68
+ "keep", "stored", "written", "noted", "logged"],
69
+ "prompt": "IMPORTANT: You haven't asked where they keep their data yet. Ask how they track or record their business info (Excel, notebook, software, etc.).",
70
+ },
71
+ }
72
+
73
+
74
+ def _detect_covered_categories(messages: list[Message]) -> dict[str, bool]:
75
+ """Analyze conversation to determine which question categories have been covered."""
76
+ covered = {cat: False for cat in QUESTION_CATEGORIES}
77
+
78
+ all_text = " ".join(msg.content.lower() for msg in messages)
79
+
80
+ for cat_id, cat_info in QUESTION_CATEGORIES.items():
81
+ keyword_hits = sum(1 for kw in cat_info["keywords"] if kw in all_text)
82
+ # Need at least 2 keyword hits to consider a category covered
83
+ if keyword_hits >= 2:
84
+ covered[cat_id] = True
85
+
86
+ return covered
87
+
88
+
89
+ def _build_category_guidance(covered: dict[str, bool], turn_number: int) -> str:
90
+ """Build a SOFT guidance block β€” suggestions, not demands."""
91
+ uncovered = [
92
+ QUESTION_CATEGORIES[cat]["prompt"]
93
+ for cat, is_covered in covered.items()
94
+ if not is_covered
95
+ ]
96
+
97
+ if not uncovered:
98
+ return "\nYou've gathered enough info. Focus on giving RECOMMENDATIONS now.\n"
99
+
100
+ # Soft suggestion β€” max 1 at a time to avoid overwhelming
101
+ guidance = "\nπŸ’‘ Suggestion β€” if relevant, you might want to ask about:\n"
102
+ guidance += f" β†’ {uncovered[0]}\n"
103
+
104
+ covered_list = [
105
+ QUESTION_CATEGORIES[cat]["label"]
106
+ for cat, is_covered in covered.items()
107
+ if is_covered
108
+ ]
109
+ if covered_list:
110
+ guidance += f"\n🚫 ALREADY COVERED (do NOT ask about these again): {', '.join(covered_list)}\n"
111
+
112
+ guidance += "\nBut ONLY ask if it naturally follows from what the client just said. Do NOT force it.\n"
113
+
114
+ return guidance
115
+
116
+
117
+ # ---------------------------------------------------------------------------
118
+ # Conversation intelligence helpers
119
+ # ---------------------------------------------------------------------------
120
+
121
+ DEFERRAL_PATTERNS = [
122
+ r"\bi\s*(?:don'?t|dont|dotn|do\s*not)\s*know\b",
123
+ r"\bno\s*idea\b",
124
+ r"\bnot\s*sure\b",
125
+ r"\byou\s*decide\b",
126
+ r"\byou\s*tell\s*me\b",
127
+ r"\bidk\b",
128
+ r"\bno\s*clue\b",
129
+ r"\bi\s*have\s*no\s*(idea|clue)\b",
130
+ r"\bwhat\s*do\s*you\s*(mean|suggest|think|recommend)\b",
131
+ r"\bcan\s*you\s*explain\b",
132
+ r"\bnope\b",
133
+ r"\bi\s*(?:don'?t|dont)\s*(?:really\s*)?know\b",
134
+ ]
135
+
136
+
137
+ def _count_deferrals(messages: list[Message]) -> int:
138
+ """Count how many times the user deferred or said 'I don't know'."""
139
+ count = 0
140
+ for msg in messages:
141
+ if msg.role == "user":
142
+ text = msg.content.lower()
143
+ for pattern in DEFERRAL_PATTERNS:
144
+ if re.search(pattern, text):
145
+ count += 1
146
+ break
147
+ return count
148
+
149
+
150
+ def _count_bot_questions(messages: list[Message]) -> int:
151
+ """Count total questions the bot has asked across all turns."""
152
+ return sum(msg.content.count("?") for msg in messages if msg.role == "assistant")
153
+
154
+
155
+ def _get_turn_number(messages: list[Message]) -> int:
156
+ """Get the current turn number (count of user messages)."""
157
+ return sum(1 for msg in messages if msg.role == "user") + 1
158
+
159
+
160
+ def _get_previous_bot_responses(messages: list[Message]) -> list[str]:
161
+ """Get all previous bot responses."""
162
+ return [msg.content for msg in messages if msg.role == "assistant"]
163
+
164
+
165
+ # Specific patterns that indicate the bot ACTUALLY asked about data STORAGE
166
+ # Must be specific enough to not false-positive on generic mentions
167
+ DATA_STORAGE_QUESTION_PATTERNS = [
168
+ r"(?:how|where) do you (?:keep track|track|record|store|log|manage)",
169
+ r"(?:do you|are you) (?:use|using) (?:any |a |)(?:excel|spreadsheet|notebook|software|tool|system|app)",
170
+ r"(?:excel|spreadsheet|notebook|software|database|app|tool|system).+(?:to track|to record|to manage|to store|to keep)",
171
+ r"(?:keep|store|record|track|log).+(?:in a |in |on |using |with )(?:excel|spreadsheet|notebook|software|database|app|system|paper)",
172
+ r"how (?:do you|are you) (?:currently |)(?:tracking|recording|storing|managing|keeping) (?:your |the |all |this )",
173
+ r"(?:track|record|manage) your (?:business |)(?:information|data|sales|inventory)",
174
+ r"(?:track or record|record or track|store or track)",
175
+ ]
176
+
177
+
178
+ def _has_asked_about_data(messages: list[Message]) -> bool:
179
+ """Check if the bot has SPECIFICALLY asked about data storage/tracking tools.
180
+
181
+ Only returns True if the bot asked WHERE/HOW data is stored (Excel, notebook, etc.)
182
+ Not just any mention of 'data' or 'track' in general context.
183
+ """
184
+ for msg in messages:
185
+ if msg.role == "assistant" and "?" in msg.content:
186
+ content_lower = msg.content.lower()
187
+ # Check each question sentence specifically
188
+ # Split on ? to get question fragments
189
+ question_parts = content_lower.split("?")
190
+ for part in question_parts:
191
+ for pattern in DATA_STORAGE_QUESTION_PATTERNS:
192
+ if re.search(pattern, part):
193
+ return True
194
+ return False
195
+
196
+
197
+ def _check_repetition(new_reply: str, previous_replies: list[str]) -> float:
198
+ """Check sentence-level overlap with previous replies. Returns 0.0-1.0."""
199
+ if not previous_replies:
200
+ return 0.0
201
+
202
+ new_sentences = set(
203
+ s.strip().lower()
204
+ for s in re.split(r'[.!?\n]', new_reply)
205
+ if len(s.strip()) > 20
206
+ )
207
+
208
+ if not new_sentences:
209
+ return 0.0
210
+
211
+ max_overlap = 0.0
212
+ for prev in previous_replies:
213
+ prev_sentences = set(
214
+ s.strip().lower()
215
+ for s in re.split(r'[.!?\n]', prev)
216
+ if len(s.strip()) > 20
217
+ )
218
+ if not prev_sentences:
219
+ continue
220
+
221
+ overlap_count = 0
222
+ for new_s in new_sentences:
223
+ for prev_s in prev_sentences:
224
+ if SequenceMatcher(None, new_s, prev_s).ratio() > 0.75:
225
+ overlap_count += 1
226
+ break
227
+
228
+ overlap_ratio = overlap_count / len(new_sentences)
229
+ max_overlap = max(max_overlap, overlap_ratio)
230
+
231
+ return max_overlap
232
+
233
+
234
+ # ---------------------------------------------------------------------------
235
+ # Reply quality enforcement (code-level, can't be bypassed by LLM)
236
+ # ---------------------------------------------------------------------------
237
+
238
+ # Patterns that indicate the bot is asking the client to solve their own problem
239
+ SELF_SOLVING_PATTERNS = [
240
+ r"what do you think is (?:the |)(?:main |primary |root |)(?:reason|cause|issue)",
241
+ r"what do you (?:think|believe|feel) (?:is |might be |could be |)",
242
+ r"how do you think .+ could help",
243
+ r"what would you (?:like|want|suggest|recommend)",
244
+ r"do you have any (?:idea|thoughts|theories) (?:on |about |why )",
245
+ r"why do you think",
246
+ r"what do you attribute .+ to",
247
+ r"have you (?:considered|thought about) (?:the role of|how|why|whether)",
248
+ r"have you (?:considered|thought about) (?:offering|changing|trying|implementing)",
249
+ # Catch ANY question with "you think" in it
250
+ r"\?.*you think",
251
+ r"you think.+\?",
252
+ r"(?:reason|cause)\s+you\s+think",
253
+ r"what role (?:do you think |does |)",
254
+ ]
255
+
256
+ # Patterns that suggest causes disguised as questions
257
+ CAUSE_SUGGESTING_PATTERNS = [
258
+ r"have you noticed any changes in .+(?:demand|market|trend|preference|competition)",
259
+ r"could (?:it|this) be (?:related to|because of|due to)",
260
+ r"do you think .+(?:might be|could be) (?:causing|contributing|leading)",
261
+ r"is it possible that .+(?:demand|pricing|competition|market)",
262
+ r"(?:the role of|due to) (?:pricing|marketing|product presentation|competition)",
263
+ r"(?:external factors|market trends|consumer behavior).+(?:impact|affect|contribut)",
264
+ r"(?:changes in|shifts in) (?:consumer|customer|market|buying) (?:behavior|preference|demand|pattern)",
265
+ # Listing generic causes
266
+ r"(?:such as|like|including) (?:low |high |ineffective |poor ).*(?:demand|pric|market|competit)",
267
+ r"(?:due to|because of) (?:various|several|multiple) factors",
268
+ ]
269
+
270
+ # Filler SENTENCES that should be stripped from ANYWHERE in the response
271
+ # These are full-sentence patterns that add no value and make responses robotic
272
+ FILLER_SENTENCES = [
273
+ # "I'm glad / I'm happy" openers
274
+ r"i'm glad you (?:shared|told|mentioned|brought)",
275
+ r"i'm happy (?:to hear|you shared|you told)",
276
+ # "I'd love to" filler
277
+ r"i'd love to (?:learn|help|explore|understand|hear|know|dig)",
278
+ r"i'd like to (?:learn|help|explore|understand|hear|know|dig)",
279
+ # Restating the problem robotically
280
+ r"a slow[- ]moving inventory (?:of|can be)",
281
+ r"(?:can be|is) a (?:challenging|significant|difficult|tough|particularly challenging) (?:issue|problem|situation|challenge) to (?:tackle|address|deal with)",
282
+ r"it's likely that this is affecting",
283
+ r"this (?:can|may|might) (?:be|also be) (?:affecting|impacting|taking)",
284
+ # Robotic transitions
285
+ r"it sounds like you(?:'re| are) (?:dealing with|experiencing|having|facing|going through)",
286
+ r"^it(?:'s| is) (?:clear|evident|apparent|obvious|understandable|interesting) that",
287
+ r"it can be (?:particularly |especially |quite |very |)(?:challenging|concerning|frustrating|difficult|tough)",
288
+ r"i (?:can |)(?:understand|see|imagine|appreciate) (?:that |how |why )(?:this|it|you)",
289
+ r"that(?:'s| is) (?:a |definitely a |certainly a |)(?:great|good|excellent|interesting)",
290
+ r"thank you for sharing",
291
+ r"i'm (?:starting to |)(?:get|getting) a sense",
292
+ r"^(?:a |this is a )(?:challenging|significant|difficult|tough|concerning) (?:situation|challenge|problem|issue)",
293
+ r"you(?:'d| would) be (?:concerned|worried|frustrated|surprised)",
294
+ r"it seems like (?:we(?:'ve| have)|you(?:'ve| have))",
295
+ # Recap openers β€” bot restates known facts every turn
296
+ r"you(?:'re| are) (?:in|involved in) the .{3,30} business",
297
+ r"you(?:'ve| have) (?:been using|mentioned|shared|told|noted|indicated|acknowledged)",
298
+ r"given the (?:specifics|broad range|unique aspects|diverse nature|complexity)",
299
+ r"to better understand (?:the scope|your situation|the issue|your)",
300
+ r"this (?:is a good starting point|indicates you have)",
301
+ r"i'm excited to learn more",
302
+ r"i'm looking forward to",
303
+ r"understanding (?:your|the) specific",
304
+ r"(?:this|that) (?:can be|is) (?:particularly |especially |)(?:challenging|concerning|frustrating)",
305
+ # Repeated robotic phrases
306
+ r"(?:can be |is )particularly (?:challenging|concerning)",
307
+ r"a (?:challenging|significant) (?:situation|challenge|financial burden)",
308
+ r"(?:\w+ )?industry(?:,|) where trends and (?:consumer |customer |)preferences can shift",
309
+ r"wasted resources,? tied-?up capital",
310
+ r"high value and luxury nature",
311
+ r"it(?:'s| is) essential to (?:identify|address|tackle|consider)",
312
+ r"(?:affecting|impacting) your business(?:'s|'s) (?:overall |)(?:performance|cash flow|profitability)",
313
+ r"(?:overall |)(?:performance|appearance|freshness) and (?:customer engagement|profitability)",
314
+ r"(?:get|getting) your business back on track",
315
+ # "I'm imagining" / "I'm thinking"
316
+ r"i'm (?:imagining|thinking) that (?:this|it) (?:might|may|could|is)",
317
+ # "It seems like" (broad β€” catches any usage)
318
+ r"it seems like (?:the|your|this|that)",
319
+ # "This could indicate/be due to"
320
+ r"this (?:could|may|might) (?:indicate|suggest|mean|be due to|be related to|be a sign)",
321
+ # "It's likely that you're"
322
+ r"it(?:'s| is) likely that (?:you(?:'re| are)|this|there)",
323
+ # "Given the high value / slow movement"
324
+ r"given the (?:high value|slow movement|nature|importance|significance)",
325
+ # "It's great that you..."
326
+ r"it(?:'s| is) great that you(?:'ve| have)",
327
+ # "You've just started our conversation"
328
+ r"you(?:'ve| have) just started",
329
+ ]
330
+
331
+ # Section headers that create robotic formatting
332
+ RECAP_HEADERS = [
333
+ r"\*\*(?:key points?|current understanding|summary|what (?:we|i) know(?: so far)?|what you(?:'ve| have) (?:told|shared|mentioned))(?:\s*so far)?\s*:?\*\*",
334
+ r"\*\*(?:key (?:observations?|takeaways?|insights?|details?|information))\s*:?\*\*",
335
+ r"\*\*(?:current challenges?|situation overview|current situation)\s*:?\*\*",
336
+ ]
337
+
338
+
339
+ def _detect_bad_question_patterns(reply: str) -> str | None:
340
+ """Detect self-solving and cause-suggesting questions.
341
+
342
+ Returns a correction instruction if bad patterns found, None otherwise.
343
+ """
344
+ reply_lower = reply.lower()
345
+
346
+ for pattern in SELF_SOLVING_PATTERNS:
347
+ if re.search(pattern, reply_lower):
348
+ return (
349
+ "⚠️ You asked the client to diagnose their own problem. "
350
+ "YOU are the consultant β€” YOU figure out the cause. "
351
+ "Instead, ask a SPECIFIC diagnostic question that helps you "
352
+ "narrow down the cause yourself. For example, ask about "
353
+ "specifics of the problem, not what they think is causing it."
354
+ )
355
+
356
+ for pattern in CAUSE_SUGGESTING_PATTERNS:
357
+ if re.search(pattern, reply_lower):
358
+ return (
359
+ "⚠️ You suggested possible causes IN your question. "
360
+ "Don't lead the client toward causes you've assumed. "
361
+ "Ask a NEUTRAL question that lets them describe their "
362
+ "situation without your bias."
363
+ )
364
+
365
+ return None
366
+
367
+
368
+ def _strip_filler_sentences(reply: str) -> str:
369
+ """Strip filler sentences from the response.
370
+
371
+ Strategy: Only strip filler from the OPENING of each paragraph.
372
+ This removes robotic openers while preserving analytical content.
373
+ If stripping would make the response too short, keep the original.
374
+ """
375
+ paragraphs = reply.split("\n")
376
+ result_paragraphs = []
377
+
378
+ for para in paragraphs:
379
+ if not para.strip():
380
+ result_paragraphs.append(para)
381
+ continue
382
+
383
+ # Split paragraph into sentences
384
+ sentences = re.split(r'(?<=[.!])\s+', para.strip())
385
+ if not sentences:
386
+ result_paragraphs.append(para)
387
+ continue
388
+
389
+ # Only check the FIRST 2 sentences of each paragraph for filler
390
+ # This preserves analytical content in the middle/end
391
+ kept_sentences = list(sentences) # start with all
392
+
393
+ # Strip leading filler sentences (up to 2)
394
+ stripped = 0
395
+ while stripped < min(2, len(kept_sentences)):
396
+ sent_lower = kept_sentences[0].strip().lower()
397
+ is_filler = False
398
+ for pattern in FILLER_SENTENCES:
399
+ if re.search(pattern, sent_lower):
400
+ is_filler = True
401
+ break
402
+ if is_filler:
403
+ kept_sentences.pop(0)
404
+ stripped += 1
405
+ else:
406
+ break
407
+
408
+ if kept_sentences:
409
+ rebuilt = " ".join(s.strip() for s in kept_sentences if s.strip())
410
+ if rebuilt and rebuilt[0].islower():
411
+ rebuilt = rebuilt[0].upper() + rebuilt[1:]
412
+ result_paragraphs.append(rebuilt)
413
+
414
+ result = "\n".join(result_paragraphs).strip()
415
+ result = re.sub(r"\n{3,}", "\n\n", result)
416
+ result = result.strip()
417
+
418
+ # SAFEGUARD: If stripping made the response too short, keep the original
419
+ # Count non-question content length
420
+ non_question = re.sub(r'[^\n]*\?[^\n]*', '', result)
421
+ if len(non_question.strip()) < 80 and len(reply.strip()) > len(result.strip()):
422
+ return reply
423
+
424
+ return result if result else reply
425
+
426
+
427
+ def _break_wall_of_text(reply: str) -> str:
428
+ """Break long wall-of-text paragraphs into readable chunks.
429
+
430
+ If any paragraph is longer than 400 chars without a newline,
431
+ split it into sentences and group them into shorter paragraphs.
432
+ """
433
+ paragraphs = reply.split("\n\n")
434
+ result = []
435
+
436
+ for para in paragraphs:
437
+ if len(para) > 400 and "\n" not in para.strip():
438
+ # Split into sentences
439
+ sentences = re.split(r'(?<=[.!?])\s+', para.strip())
440
+ if len(sentences) <= 2:
441
+ result.append(para)
442
+ continue
443
+
444
+ # Group into chunks of 2-3 sentences
445
+ chunks = []
446
+ current_chunk = []
447
+ for sent in sentences:
448
+ current_chunk.append(sent)
449
+ if len(current_chunk) >= 2:
450
+ chunks.append(" ".join(current_chunk))
451
+ current_chunk = []
452
+ if current_chunk:
453
+ chunks.append(" ".join(current_chunk))
454
+
455
+ result.append("\n\n".join(chunks))
456
+ else:
457
+ result.append(para)
458
+
459
+ return "\n\n".join(result)
460
+
461
+
462
+ def _format_questions(reply: str) -> str:
463
+ """Ensure each question is on its own paragraph with blank lines around it.
464
+
465
+ The frontend renders questions in styled boxes only when they're in their
466
+ own <p> tag, which requires blank lines around them in the markdown.
467
+ """
468
+ lines = reply.split("\n")
469
+ result = []
470
+
471
+ for i, line in enumerate(lines):
472
+ stripped = line.strip()
473
+
474
+ # Check if this line contains a question mark (likely a question)
475
+ if "?" in stripped and len(stripped) > 10:
476
+ # Add blank line before if the previous line isn't blank
477
+ if result and result[-1].strip():
478
+ result.append("")
479
+ result.append(line)
480
+ # Mark that we need a blank line after
481
+ # (handled by checking if next non-empty line is also a question)
482
+ if i + 1 < len(lines) and lines[i + 1].strip():
483
+ result.append("")
484
+ else:
485
+ result.append(line)
486
+
487
+ text = "\n".join(result)
488
+ # Clean up triple+ blank lines
489
+ text = re.sub(r"\n{3,}", "\n\n", text)
490
+ return text.strip()
491
+
492
+
493
+ # ---------------------------------------------------------------------------
494
+ # Anti-hallucination checks
495
+ # ---------------------------------------------------------------------------
496
+
497
+ def _extract_user_text(messages: list[Message]) -> str:
498
+ """Get all user messages combined as a single text block."""
499
+ return " ".join(msg.content.lower() for msg in messages if msg.role == "user")
500
+
501
+
502
+ def _validate_understanding(understanding: str, messages: list[Message]) -> str:
503
+ """Validate that the 'understanding' field doesn't contain fabricated facts.
504
+
505
+ If the understanding mentions specific numbers, tools, or details not found
506
+ in any user message, strip it back to a safer version.
507
+ """
508
+ if not understanding:
509
+ return understanding
510
+
511
+ user_text = _extract_user_text(messages)
512
+ if not user_text:
513
+ return understanding
514
+
515
+ # Check for specific numbers in understanding that aren't in user text
516
+ numbers_in_understanding = re.findall(r'\b\d[\d,.]+\b', understanding)
517
+ fabricated_numbers = [
518
+ n for n in numbers_in_understanding
519
+ if n not in user_text
520
+ ]
521
+
522
+ # If more than half the specific numbers are fabricated, reset understanding
523
+ if numbers_in_understanding and len(fabricated_numbers) > len(numbers_in_understanding) / 2:
524
+ # Rebuild understanding from just user messages
525
+ user_messages = [msg.content for msg in messages if msg.role == "user"]
526
+ return "Client has said: " + " | ".join(user_messages[-5:])
527
+
528
+ return understanding
529
+
530
+
531
+ def _check_hallucination(reply: str, messages: list[Message]) -> bool:
532
+ """Check if the reply contains likely hallucinated specific claims.
533
+
534
+ Returns True if hallucination is detected.
535
+ """
536
+ user_text = _extract_user_text(messages)
537
+ if not user_text:
538
+ return False
539
+
540
+ reply_lower = reply.lower()
541
+
542
+ # Check for specific dollar/rupee amounts the user never mentioned
543
+ amounts_in_reply = re.findall(
544
+ r'(?:β‚Ή|rs\.?|inr|\$|usd)\s*[\d,.]+(?:\s*(?:lakh|crore|k|m|million|billion))?',
545
+ reply_lower
546
+ )
547
+ for amount in amounts_in_reply:
548
+ # Extract just the number part to check
549
+ num_part = re.findall(r'[\d,.]+', amount)
550
+ if num_part and not any(n in user_text for n in num_part):
551
+ return True
552
+
553
+ # Check for percentage claims the user never mentioned
554
+ percentages_in_reply = re.findall(r'\d+(?:\.\d+)?%', reply_lower)
555
+ for pct in percentages_in_reply:
556
+ num = pct.replace('%', '')
557
+ if num not in user_text and pct not in user_text:
558
+ return True
559
+
560
+ return False
561
+
562
+
563
+ # ---------------------------------------------------------------------------
564
+ # Core processing
565
+ # ---------------------------------------------------------------------------
566
+
567
+ def _build_message_list(state: SessionState, user_message: str) -> list:
568
+ """Build the message list with intelligence injection."""
569
+ turn_number = _get_turn_number(state.messages)
570
+ deferral_count = _count_deferrals(state.messages)
571
+ questions_asked = _count_bot_questions(state.messages)
572
+ user_msg_lower = user_message.lower().strip()
573
+
574
+ # --- SOLUTION SHORTCUT ---
575
+ # If user says "give me solution" / "tell me what to do" etc., jump to final answer
576
+ SOLUTION_SHORTCUT_PATTERNS = [
577
+ r"give me (?:the |a |)(?:solution|answer|recommendation|plan|advice)",
578
+ r"tell me what to do",
579
+ r"just (?:give me|tell me|skip to)",
580
+ r"what (?:should|do) (?:i|we) do",
581
+ r"(?:skip|jump) (?:to |)(?:the |)(?:solution|answer|recommendation|advice)",
582
+ r"i (?:just |)(?:want|need) (?:the |a |your |)(?:solution|answer|recommendation|plan|advice)",
583
+ ]
584
+ if any(re.search(p, user_msg_lower) for p in SOLUTION_SHORTCUT_PATTERNS):
585
+ state.phase = Phase.REFINEMENT
586
+ state.confidence = max(state.confidence, 0.85)
587
+
588
+ # --- NEW TOPIC RESET ---
589
+ # If we're in SOLUTION/REFINEMENT and user mentions a new topic, reset to DISCOVERY
590
+ END_CONVO_PATTERNS = [
591
+ r"^no$", r"^nope$", r"^nothing$", r"^that'?s? (?:all|it)$",
592
+ r"^thanks?$", r"^thank you", r"^bye$", r"^ok$", r"^okay$",
593
+ ]
594
+ is_end_of_convo = any(re.search(p, user_msg_lower) for p in END_CONVO_PATTERNS)
595
+
596
+ if state.phase in (Phase.SOLUTION, Phase.REFINEMENT) and not is_end_of_convo:
597
+ # Check if the last bot message had "anything else" β€” signals end of a topic
598
+ last_bot_msg = ""
599
+ for msg in reversed(state.messages):
600
+ if msg.role == "assistant":
601
+ last_bot_msg = msg.content.lower()
602
+ break
603
+ if "anything else" in last_bot_msg or "other challenges" in last_bot_msg:
604
+ # User is bringing up a new topic β€” reset to discovery
605
+ state.phase = Phase.DISCOVERY
606
+ state.confidence = 0.1
607
+ state.topic_start_turn = turn_number # Reset the topic turn counter
608
+ # Keep the conversation history but mark the new topic
609
+ state.understanding = state.understanding + f"\n\n--- NEW TOPIC: {user_message} ---\n" if state.understanding else ""
610
+
611
+ # --- HARD PHASE FORCING β€” guarantee we reach recommendations ---
612
+ # Use RELATIVE turn count (turns since current topic started)
613
+ topic_turns = turn_number - state.topic_start_turn
614
+ current_phase = state.phase
615
+ current_confidence = state.confidence
616
+ if topic_turns >= 12 and current_phase != Phase.REFINEMENT:
617
+ current_phase = Phase.REFINEMENT
618
+ current_confidence = max(current_confidence, 0.85)
619
+ state.phase = current_phase
620
+ state.confidence = current_confidence
621
+ elif topic_turns >= 10 and current_phase in (Phase.DISCOVERY, Phase.EXPLORATION):
622
+ current_phase = Phase.SOLUTION
623
+ current_confidence = max(current_confidence, 0.65)
624
+ state.phase = current_phase
625
+ state.confidence = current_confidence
626
+
627
+ # --- USE TURN-1 SPECIFIC PROMPT FOR FIRST MESSAGE ---
628
+ if turn_number <= 1:
629
+ from prompts import TURN_1_PROMPT
630
+ phase_prompt = TURN_1_PROMPT
631
+ else:
632
+ phase_prompt = PHASE_PROMPTS[current_phase].format(confidence=current_confidence)
633
+
634
+ # Add understanding context (validated)
635
+ if state.understanding:
636
+ phase_prompt += (
637
+ f"\n\nCLIENT INFO (for YOUR reference only β€” do NOT summarize or repeat this back to the client):"
638
+ f"\n{state.understanding}\n"
639
+ f"IMPORTANT: Do NOT start your response by restating what you know about the client. "
640
+ f"Jump STRAIGHT into new insights, analysis, or questions.\n"
641
+ )
642
+
643
+ # --- CONVERSATION STATS ---
644
+ stats = f"\nTurn {turn_number} | {questions_asked} questions asked so far | {deferral_count} deferrals\n"
645
+ phase_prompt += stats
646
+
647
+ # --- HARD DATA QUESTION ENFORCEMENT ---
648
+ # If the bot hasn't asked about data storage by turn 2, force it
649
+ if turn_number >= 2 and not _has_asked_about_data(state.messages):
650
+ phase_prompt += """
651
+ ⚠️ MANDATORY: You have NOT yet asked where the client keeps their business data.
652
+ ONE of your questions MUST be about how they track or record their business
653
+ information. For example: "How do you keep track of all this β€” in a spreadsheet,
654
+ a notebook, or some software?" This is critical for your recommendations.
655
+ """
656
+ elif turn_number > 1:
657
+ covered = _detect_covered_categories(state.messages)
658
+ category_guidance = _build_category_guidance(covered, turn_number)
659
+ phase_prompt += category_guidance
660
+
661
+ # --- DYNAMIC BEHAVIORAL OVERRIDES ---
662
+ if deferral_count >= 2:
663
+ phase_prompt += """
664
+ ⚠️ THE USER SAID "I DON'T KNOW" MULTIPLE TIMES. They cannot answer more questions.
665
+ SWITCH TO ADVICE MODE IMMEDIATELY:
666
+ - STOP asking diagnostic questions. Give at most 1 simple yes/no question.
667
+ - LEAD with YOUR specific recommendations: "Based on what you've told me, here's what I'd suggest..."
668
+ - Make reasonable assumptions and state them.
669
+ - Be SPECIFIC: name tools, methods, steps. No generic advice like "review your pricing".
670
+ - You are the EXPERT β€” the client is paying YOU to figure it out.
671
+ """
672
+
673
+ # Question count limits only apply during diagnostic phases
674
+ # In solution/final phases, the prompt itself handles question limits
675
+ if state.phase in (Phase.DISCOVERY, Phase.EXPLORATION):
676
+ if questions_asked >= 20:
677
+ phase_prompt += "\n⚠️ STOP ASKING. Give your recommendation NOW. Zero questions.\n"
678
+ elif questions_asked >= 14:
679
+ phase_prompt += "\nYou've asked enough questions. Shift to mostly advice, at most 1 question.\n"
680
+
681
+ if turn_number >= 8:
682
+ phase_prompt += "\n⚠️ Turn 8+. Give CONCRETE recommendations now. Reference details they shared.\n"
683
+
684
+ messages = [SystemMessage(content=phase_prompt)]
685
+
686
+ # Conversation history (last 20 messages)
687
+ for msg in state.messages[-20:]:
688
+ if msg.role == "user":
689
+ messages.append(HumanMessage(content=msg.content))
690
+ else:
691
+ messages.append(SystemMessage(content=f"[Your previous response]: {msg.content}"))
692
+
693
+ messages.append(HumanMessage(content=user_message))
694
+
695
+ return messages
696
+
697
+
698
+ def _parse_llm_output(raw: str) -> dict[str, Any]:
699
+ """Parse the JSON output from the LLM with robust fallback."""
700
+ text = raw.strip()
701
+
702
+ # Strip markdown code fences
703
+ json_match = re.search(r"```(?:json)?\s*(\{.*?\})\s*```", text, re.DOTALL)
704
+ if json_match:
705
+ text = json_match.group(1)
706
+ else:
707
+ brace_match = re.search(r"\{.*\}", text, re.DOTALL)
708
+ if brace_match:
709
+ text = brace_match.group(0)
710
+
711
+ # Attempt 1: direct parse
712
+ try:
713
+ data = json.loads(text)
714
+ return {
715
+ "reply": str(data.get("reply", text)),
716
+ "confidence": float(data.get("confidence", 0.1)),
717
+ "understanding": str(data.get("understanding", "")),
718
+ }
719
+ except (json.JSONDecodeError, ValueError):
720
+ pass
721
+
722
+ # Attempt 2: sanitise newlines
723
+ try:
724
+ sanitised = re.sub(r'(?<=": ")(.*?)(?="[,\s]*\n?\s*"|\"\s*\})',
725
+ lambda m: m.group(0).replace('\n', '\\n'),
726
+ text, flags=re.DOTALL)
727
+ data = json.loads(sanitised)
728
+ return {
729
+ "reply": str(data.get("reply", text)),
730
+ "confidence": float(data.get("confidence", 0.1)),
731
+ "understanding": str(data.get("understanding", "")),
732
+ }
733
+ except (json.JSONDecodeError, ValueError, TypeError):
734
+ pass
735
+
736
+ # Attempt 3: regex extraction
737
+ reply = ""
738
+ confidence = 0.1
739
+ understanding = ""
740
+
741
+ reply_match = re.search(
742
+ r'"reply"\s*:\s*"(.*?)"\s*,\s*"(?:confidence|understanding)',
743
+ text, re.DOTALL,
744
+ )
745
+ if reply_match:
746
+ reply = reply_match.group(1).replace('\\n', '\n').replace('\\"', '"')
747
+
748
+ conf_match = re.search(r'"confidence"\s*:\s*([\d.]+)', text)
749
+ if conf_match:
750
+ try:
751
+ confidence = float(conf_match.group(1))
752
+ except ValueError:
753
+ pass
754
+
755
+ und_match = re.search(
756
+ r'"understanding"\s*:\s*"(.*?)"\s*\}',
757
+ text, re.DOTALL,
758
+ )
759
+ if und_match:
760
+ understanding = und_match.group(1).replace('\\n', '\n').replace('\\"', '"')
761
+
762
+ if reply:
763
+ return {"reply": reply, "confidence": confidence, "understanding": understanding}
764
+
765
+ # Ultimate fallback
766
+ fallback = text
767
+ for pattern in [r'"reply"\s*:\s*"?', r'"confidence"\s*:.*', r'"understanding"\s*:.*', r'[{}]']:
768
+ fallback = re.sub(pattern, '', fallback, flags=re.DOTALL)
769
+ fallback = fallback.strip().strip('"').strip(',').strip()
770
+
771
+ return {
772
+ "reply": fallback if fallback else raw.strip(),
773
+ "confidence": confidence,
774
+ "understanding": understanding,
775
+ }
776
+
777
+
778
+ def _determine_phase(confidence: float) -> Phase:
779
+ """Map confidence score to the appropriate consultation phase."""
780
+ for phase in reversed(PHASE_ORDER):
781
+ low, _ = PHASE_THRESHOLDS[phase]
782
+ if confidence >= low:
783
+ return phase
784
+ return Phase.DISCOVERY
785
+
786
+
787
+ # ---------------------------------------------------------------------------
788
+ # Main graph execution
789
+ # ---------------------------------------------------------------------------
790
+
791
+ async def run_consultation(session_state: SessionState, user_message: str) -> GraphState:
792
+ """Run one turn of the consultation with quality enforcement."""
793
+
794
+ # 1. Build messages with intelligence injection
795
+ messages = _build_message_list(session_state, user_message)
796
+ turn_number = _get_turn_number(session_state.messages)
797
+
798
+ # 2. Call LLM
799
+ response = await llm.ainvoke(messages)
800
+ parsed = _parse_llm_output(response.content)
801
+
802
+ # 3. Anti-repetition check
803
+ previous_replies = _get_previous_bot_responses(session_state.messages)
804
+ overlap = _check_repetition(parsed["reply"], previous_replies)
805
+
806
+ if overlap > 0.35 and previous_replies:
807
+ anti_repeat_msg = SystemMessage(content=(
808
+ "⚠️ YOUR RESPONSE IS TOO SIMILAR TO A PREVIOUS ONE. The user will see "
809
+ "repeated content. Write something COMPLETELY DIFFERENT:\n"
810
+ "- Different opening, different observations, different questions\n"
811
+ "- Share a NEW insight you haven't mentioned\n"
812
+ "- Ask about a DIFFERENT topic entirely"
813
+ ))
814
+ messages.insert(-1, anti_repeat_msg)
815
+ response = await llm.ainvoke(messages)
816
+ parsed = _parse_llm_output(response.content)
817
+
818
+ # 3.5. HARD question limit enforcement β€” max 2 questions
819
+ reply = parsed["reply"]
820
+ question_count = reply.count("?")
821
+ if question_count > 2:
822
+ # Keep only the first 2 questions, remove the rest
823
+ parts = reply.split("?")
824
+ if len(parts) > 3:
825
+ # Keep everything up to and including the 2nd "?"
826
+ kept = "?".join(parts[:2]) + "?"
827
+ # Check if there's meaningful non-question content after
828
+ remaining = "?".join(parts[2:])
829
+ remaining_lines = remaining.split("\n")
830
+ non_question_lines = [
831
+ line for line in remaining_lines
832
+ if line.strip() and "?" not in line
833
+ ]
834
+ if non_question_lines:
835
+ kept += "\n" + "\n".join(non_question_lines)
836
+ parsed["reply"] = kept.strip()
837
+
838
+ # 3.6. Anti-hallucination check
839
+ if _check_hallucination(parsed["reply"], session_state.messages):
840
+ anti_hallucinate_msg = SystemMessage(content=(
841
+ "⚠️ YOUR RESPONSE CONTAINS SPECIFIC NUMBERS, AMOUNTS, OR PERCENTAGES "
842
+ "THAT THE CLIENT NEVER MENTIONED. This is hallucination.\n"
843
+ "REWRITE your response:\n"
844
+ "- Remove ALL specific numbers/amounts that the client did not explicitly state\n"
845
+ "- If you need numbers, ASK the client instead of guessing\n"
846
+ "- Only reference facts the client actually told you"
847
+ ))
848
+ messages.insert(-1, anti_hallucinate_msg)
849
+ response = await llm.ainvoke(messages)
850
+ parsed = _parse_llm_output(response.content)
851
+
852
+ # 3.7. Minimum response length check β€” replies must be substantive
853
+ # (Run BEFORE bad question check so expanded responses get checked too)
854
+ reply_text = parsed["reply"]
855
+ non_q_lines = [line for line in reply_text.split("\n") if "?" not in line]
856
+ non_q_content = "\n".join(non_q_lines).strip()
857
+ if len(non_q_content) < 200:
858
+ expand_msg = SystemMessage(content=(
859
+ "⚠️ Your response is TOO SHORT. The client is paying for expert analysis.\n"
860
+ "EXPAND your response to include:\n"
861
+ "- 1-2 opening sentences reacting to what they said\n"
862
+ "- A **bold header** like '**Key Observations**' followed by 3-4 bullet points\n"
863
+ " with specific insights, industry knowledge, or patterns you've noticed\n"
864
+ "- 1-2 sentences of concrete recommendation or context\n"
865
+ "- THEN your question(s)\n"
866
+ "Total response before questions should be at least 5-8 sentences."
867
+ ))
868
+ messages.insert(-1, expand_msg)
869
+ response = await llm.ainvoke(messages)
870
+ parsed = _parse_llm_output(response.content)
871
+
872
+ # 3.8. Bad question pattern check (self-solving, cause-suggesting)
873
+ # Runs AFTER length expansion so expanded responses are also checked
874
+ correction = _detect_bad_question_patterns(parsed["reply"])
875
+ if correction:
876
+ correction_msg = SystemMessage(content=correction)
877
+ messages.insert(-1, correction_msg)
878
+ response = await llm.ainvoke(messages)
879
+ parsed = _parse_llm_output(response.content)
880
+
881
+ # 3.9. Strip leading filler sentences (no LLM call)
882
+ parsed["reply"] = _strip_filler_sentences(parsed["reply"])
883
+
884
+ # 3.10. Break wall-of-text responses into readable paragraphs
885
+ parsed["reply"] = _break_wall_of_text(parsed["reply"])
886
+
887
+ # 3.11. Ensure questions are properly formatted for frontend box styling
888
+ parsed["reply"] = _format_questions(parsed["reply"])
889
+
890
+ # 4. Confidence progression
891
+ MAX_INCREASE = 0.15
892
+ MIN_INCREASE = 0.06
893
+ llm_confidence = min(parsed["confidence"], 1.0)
894
+
895
+ if llm_confidence > session_state.confidence:
896
+ new_confidence = session_state.confidence + min(
897
+ llm_confidence - session_state.confidence, MAX_INCREASE
898
+ )
899
+ else:
900
+ new_confidence = min(session_state.confidence + MIN_INCREASE, 1.0)
901
+
902
+ # 5. Route phase
903
+ new_phase = _determine_phase(new_confidence)
904
+
905
+ # Phase forcing β€” use topic-relative turn count
906
+ topic_turns = turn_number - session_state.topic_start_turn
907
+ if topic_turns >= 12:
908
+ new_phase = Phase.REFINEMENT
909
+ new_confidence = max(new_confidence, 0.85)
910
+ elif topic_turns >= 10 and new_phase in (Phase.DISCOVERY, Phase.EXPLORATION):
911
+ new_phase = Phase.SOLUTION
912
+ new_confidence = max(new_confidence, 0.65)
913
+ # 6. Update understanding (with validation)
914
+ raw_understanding = parsed["understanding"] if parsed["understanding"] else session_state.understanding
915
+ new_understanding = _validate_understanding(raw_understanding, session_state.messages)
916
+
917
+ # 7. Update session state
918
+ session_state.messages.append(Message(role="user", content=user_message))
919
+ session_state.messages.append(Message(role="assistant", content=parsed["reply"]))
920
+ session_state.confidence = new_confidence
921
+ session_state.phase = new_phase
922
+ session_state.understanding = new_understanding
923
+
924
+ return GraphState(
925
+ session_state=session_state,
926
+ user_message=user_message,
927
+ assistant_reply=parsed["reply"],
928
+ new_confidence=new_confidence,
929
+ new_understanding=new_understanding,
930
+ new_phase=new_phase,
931
+ )
backend/llm.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Groq LLM setup via LangChain."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import os
6
+
7
+ from dotenv import load_dotenv
8
+ from langchain_groq import ChatGroq
9
+
10
+ # Load .env from project root
11
+ load_dotenv(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", ".env"))
12
+
13
+ _GROQ_API_KEY = os.getenv("GROQ_API_KEY", "")
14
+
15
+ if not _GROQ_API_KEY or _GROQ_API_KEY == "your_groq_api_key_here":
16
+ raise RuntimeError(
17
+ "GROQ_API_KEY is not set. "
18
+ "Please add your key to the .env file in the project root."
19
+ )
20
+
21
+ # Shared LLM instance – llama-3.3-70b-versatile on Groq for speed + quality
22
+ llm = ChatGroq(
23
+ model="llama-3.3-70b-versatile",
24
+ api_key=_GROQ_API_KEY,
25
+ temperature=0.5,
26
+ max_tokens=2048,
27
+ request_timeout=30,
28
+ max_retries=1,
29
+ )
backend/models.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Pydantic models for request/response and internal session state."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from enum import Enum
6
+ from typing import Optional
7
+
8
+ from pydantic import BaseModel, Field
9
+
10
+
11
+ # ---------------------------------------------------------------------------
12
+ # Consultation phases
13
+ # ---------------------------------------------------------------------------
14
+
15
+ class Phase(str, Enum):
16
+ DISCOVERY = "discovery"
17
+ EXPLORATION = "exploration"
18
+ CONSTRAINTS = "constraints"
19
+ SOLUTION = "solution"
20
+ REFINEMENT = "refinement"
21
+
22
+
23
+ PHASE_ORDER: list[Phase] = [
24
+ Phase.DISCOVERY,
25
+ Phase.EXPLORATION,
26
+ Phase.CONSTRAINTS,
27
+ Phase.SOLUTION,
28
+ Phase.REFINEMENT,
29
+ ]
30
+
31
+ # Confidence thresholds that trigger phase transitions
32
+ PHASE_THRESHOLDS: dict[Phase, tuple[float, float]] = {
33
+ Phase.DISCOVERY: (0.0, 0.20),
34
+ Phase.EXPLORATION: (0.20, 0.40),
35
+ Phase.CONSTRAINTS: (0.40, 0.60),
36
+ Phase.SOLUTION: (0.60, 0.80),
37
+ Phase.REFINEMENT: (0.80, 1.00),
38
+ }
39
+
40
+
41
+ # ---------------------------------------------------------------------------
42
+ # API models
43
+ # ---------------------------------------------------------------------------
44
+
45
+ class ChatRequest(BaseModel):
46
+ session_id: str = Field(..., description="Unique session identifier")
47
+ message: str = Field(..., min_length=1, description="User message")
48
+
49
+
50
+ class ChatResponse(BaseModel):
51
+ reply: str
52
+ phase: Phase
53
+ confidence: float = Field(ge=0.0, le=1.0)
54
+ understanding: str = Field(default="", description="Extracted understanding so far")
55
+
56
+
57
+ class SessionInfo(BaseModel):
58
+ session_id: str
59
+ phase: Phase
60
+ confidence: float
61
+ message_count: int
62
+ created_at: str
63
+ updated_at: str
64
+
65
+
66
+ class NewSessionResponse(BaseModel):
67
+ session_id: str
68
+
69
+
70
+ # ---------------------------------------------------------------------------
71
+ # Internal state (persisted in SQLite as JSON)
72
+ # ---------------------------------------------------------------------------
73
+
74
+ class Message(BaseModel):
75
+ role: str # "user" | "assistant"
76
+ content: str
77
+
78
+
79
+ class SessionState(BaseModel):
80
+ session_id: str = ""
81
+ messages: list[Message] = Field(default_factory=list)
82
+ understanding: str = ""
83
+ confidence: float = 0.0
84
+ phase: Phase = Phase.DISCOVERY
85
+ topic_start_turn: int = 0 # Turn when current topic started (for phase forcing)
86
+ created_at: str = ""
87
+ updated_at: str = ""
backend/prompts.py ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """System prompts for each consultation phase.
2
+
3
+ DESIGN PRINCIPLES:
4
+ - Keep prompts short β€” the LLM follows fewer rules better
5
+ - Ground every question in what the CLIENT actually said
6
+ - Never let the bot invent facts the client didn't mention
7
+ - Let the LLM generate questions naturally β€” no hardcoded examples
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ # ---------------------------------------------------------------------------
13
+ # Core persona β€” short, strict, grounded
14
+ # ---------------------------------------------------------------------------
15
+
16
+ _PERSONA = """You are an expert business consultant β€” sharp, insightful, warm.
17
+
18
+ STRICT RULES (violating ANY of these = failure):
19
+
20
+ 1. GROUNDING β€” NEVER invent, assume, or state facts the client hasn't told you.
21
+ If you don't know something, ASK β€” don't guess.
22
+
23
+ 2. QUESTION PRIORITY β€” Focus on understanding the PROBLEM first.
24
+ Before asking about operations, team, or processes, make sure you deeply
25
+ understand WHAT is going wrong and WHY. Dig into the problem itself β€” the
26
+ specifics, the timing, the root cause. Operational details come later.
27
+
28
+ 3. ASK SIMPLE QUESTIONS β€” Your questions must be easy for a non-technical person
29
+ to answer. Avoid jargon, technical terms, or metrics they wouldn't know.
30
+ Base your questions naturally on what the client just told you.
31
+
32
+ 4. NEVER ask the client to solve their own problem. YOU are the consultant β€”
33
+ it's YOUR job to diagnose, analyze, and recommend. Don't ask things like
34
+ "what do you think is causing this?" or "how do you think X could help?"
35
+
36
+ 5. Ask EXACTLY 1-2 questions per response. NEVER more than 2 question marks total.
37
+
38
+ 6. BANNED PHRASES β€” NEVER use these (instant failure):
39
+ - "I'm glad you shared" / "I'm glad you told me"
40
+ - "I'd love to learn more" / "I'd love to help"
41
+ - "a challenging issue to tackle"
42
+ - "affecting your business's overall performance"
43
+ - "get your business back on track"
44
+ - "I'm thinking that it might be related to"
45
+ Instead: JUMP STRAIGHT into your insight. Your first sentence should
46
+ contain specific knowledge or a sharp observation.
47
+
48
+ 7. FORMATTING β€” Your response MUST be RICH and well-structured:
49
+
50
+ RESPONSE STRUCTURE (follow this every time):
51
+ a) Start with 1-2 conversational sentences reacting to what they said
52
+ b) Then a **bold observation header** followed by 3-4 bullet points:
53
+ - Each bullet should be a specific insight, observation, or piece of
54
+ context relevant to their situation
55
+ - Draw on industry knowledge, common patterns, or what they've shared
56
+ - Be concrete: mention specific approaches, tools, strategies
57
+ c) Optionally: 1-2 sentences of brief recommendation or industry context
58
+ d) End with your question(s)
59
+
60
+ Your response BEFORE the questions should be at LEAST 5-8 sentences total.
61
+ Short replies with just 2-3 sentences are NOT acceptable.
62
+
63
+ QUESTION FORMAT (critical for display):
64
+ Each question MUST be on its own separate paragraph with a BLANK LINE before it.
65
+ Example:
66
+
67
+ Your conversational text here...
68
+
69
+ **Key Observations**
70
+ - Specific insight about their situation
71
+ - Another observation based on what they shared
72
+ - Industry context or pattern you've noticed
73
+
74
+ Brief recommendation or context...
75
+
76
+ **Your first question here?**
77
+
78
+ **Your second question here?**
79
+
80
+ NEVER put two questions on the same line or consecutive lines without a blank line.
81
+ NEVER use headers like "Key Points:", "Current Understanding:", "Summary:".
82
+
83
+ 8. NEVER list generic causes or possibilities. ASK to find out.
84
+
85
+ 9. When user says "I don't know" β†’ stop asking, give your recommendation instead.
86
+
87
+ 10. MOVE ON after user answers β€” Once the user answers a question, that topic is DONE.
88
+ Do NOT ask follow-up questions about the SAME thing. For example:
89
+ - If they said "spreadsheet" β†’ DO NOT ask "what challenges with your spreadsheet?"
90
+ - If they said "online" β†’ DO NOT ask "how do you sell online?"
91
+ Instead, move to a COMPLETELY DIFFERENT aspect of their business.
92
+
93
+ 11. NEVER repeat content from previous turns.
94
+
95
+ 12. Each question MUST cover a DIFFERENT aspect of the problem. Never ask two
96
+ questions about the same topic. Vary across: operations, finances, customers,
97
+ team, strategy, goals, market, competitors.
98
+
99
+ 13. Be SPECIFIC in advice: name actual methods, steps, approaches. No vague advice.
100
+ """
101
+
102
+ _OUTPUT_FORMAT = """
103
+ Respond with ONLY a JSON object:
104
+ {{
105
+ "reply": "Your markdown response",
106
+ "confidence": <float 0.0-1.0>,
107
+ "understanding": "Cumulative summary of ONLY what the client has explicitly stated. Do NOT add interpretations."
108
+ }}
109
+ Confidence: start 0.0, max +0.10/turn. Output ONLY the JSON.
110
+ """
111
+
112
+ # ---------------------------------------------------------------------------
113
+ # TURN 1 β€” first response
114
+ # ---------------------------------------------------------------------------
115
+
116
+ TURN_1_PROMPT = (
117
+ _PERSONA
118
+ + """
119
+ THIS IS THE USER'S VERY FIRST MESSAGE.
120
+
121
+ YOUR RESPONSE MUST:
122
+ 1. Start with a short, warm reaction to what they said (1 sentence)
123
+ 2. Ask 2 natural follow-up questions about their situation β€” one about their
124
+ business/domain and one about the problem. Keep them simple and conversational.
125
+
126
+ THAT'S IT. Only 2 questions. Both must be **bolded** and on their own line.
127
+ Do NOT lecture. Do NOT list possible causes. Do NOT assume anything.
128
+ Do NOT create any "Key Points" or "Summary" sections.
129
+ """
130
+ + _OUTPUT_FORMAT
131
+ )
132
+
133
+ # ---------------------------------------------------------------------------
134
+ # Phase prompts β€” used from turn 2 onwards
135
+ # ---------------------------------------------------------------------------
136
+
137
+ DISCOVERY_PROMPT = (
138
+ _PERSONA
139
+ + """
140
+ PHASE: Discovery | Confidence: {confidence:.0%}
141
+
142
+ You're still learning about their situation. Your priority is to understand
143
+ the problem deeply before moving to operations or solutions.
144
+
145
+ Ask about what matters most right now β€” the problem itself, the context,
146
+ the specifics. Only ask about operational details (team, revenue, processes)
147
+ when they're directly relevant to understanding the problem.
148
+
149
+ 1-2 natural follow-up questions based on what they just told you.
150
+ """
151
+ + _OUTPUT_FORMAT
152
+ )
153
+
154
+ EXPLORATION_PROMPT = (
155
+ _PERSONA
156
+ + """
157
+ PHASE: Exploration | Confidence: {confidence:.0%}
158
+
159
+ You understand the basics. Now dig into the ROOT CAUSE and their DATA.
160
+
161
+ Two things you MUST learn during exploration:
162
+ 1. ROOT CAUSE β€” Which specific parts are affected, what's changed, what
163
+ they've already tried. Narrow it down.
164
+ 2. DATA β€” Where do they keep track of their business information? Do they
165
+ use Excel, a notebook, software, or nothing at all? This is critical
166
+ because your recommendations depend on what data they have access to.
167
+
168
+ If you haven't asked about their data yet, make ONE of your questions
169
+ about how they track or record their business information.
170
+
171
+ Give brief insights where you can. 1-2 questions max.
172
+ """
173
+ + _OUTPUT_FORMAT
174
+ )
175
+
176
+ CONSTRAINTS_PROMPT = (
177
+ _PERSONA
178
+ + """
179
+ PHASE: Constraints | Confidence: {confidence:.0%}
180
+
181
+ SHIFT to primarily ADVISING. You have enough info.
182
+ - State your diagnosis clearly
183
+ - Propose specific solutions with clear steps
184
+ - Reference ONLY details the client actually shared
185
+ - At most 1 question
186
+
187
+ Be specific and concrete. No vague advice.
188
+ """
189
+ + _OUTPUT_FORMAT
190
+ )
191
+
192
+ SOLUTION_PROMPT = (
193
+ _PERSONA
194
+ + """
195
+ PHASE: Solution | Confidence: {confidence:.0%}
196
+
197
+ You now have enough information to deliver a DETAILED, ACTIONABLE solution.
198
+
199
+ YOUR RESPONSE MUST INCLUDE:
200
+
201
+ 1. **Problem Summary** β€” 2-3 sentences summarizing what you've learned about
202
+ their specific situation (reference details THEY told you).
203
+
204
+ 2. **Root Cause Analysis** β€” What you believe is causing the problem, based on
205
+ what they've shared. Be specific, not generic.
206
+
207
+ 3. **Step-by-Step Action Plan** β€” A NUMBERED list of concrete steps they should
208
+ take, organized by timeline:
209
+ - **Immediate (This Week):** 2-3 quick wins they can start today
210
+ - **Short-Term (Next 2-4 Weeks):** 3-4 tactical changes
211
+ - **Medium-Term (1-3 Months):** 2-3 strategic initiatives
212
+
213
+ Each step must be SPECIFIC: name actual tools, methods, platforms, strategies.
214
+ NOT "improve your marketing" but "Create an Instagram business account and
215
+ post 3-4 high-quality product photos per week with relevant hashtags."
216
+
217
+ 4. **Expected Outcomes** β€” What results they can expect if they follow the plan.
218
+
219
+ Keep this response LONG and DETAILED β€” this is the value they came for.
220
+ At most 1 question to clarify implementation details.
221
+ """
222
+ + _OUTPUT_FORMAT
223
+ )
224
+
225
+ REFINEMENT_PROMPT = (
226
+ _PERSONA
227
+ + """
228
+ PHASE: Final | Confidence: {confidence:.0%}
229
+
230
+ Deliver the COMPLETE, FINAL action plan. This is the most important response
231
+ in the entire conversation β€” make it worth their time.
232
+
233
+ YOUR RESPONSE MUST INCLUDE:
234
+
235
+ 1. **Executive Summary** β€” 3-4 sentences summarizing the problem and your
236
+ recommended approach. Reference specific details they shared.
237
+
238
+ 2. **Detailed Action Pipeline** β€” A comprehensive NUMBERED step-by-step plan:
239
+
240
+ **Phase 1: Immediate Actions (This Week)**
241
+ - Step 1: [Specific action with details]
242
+ - Step 2: [Specific action with details]
243
+ - Step 3: [Specific action with details]
244
+
245
+ **Phase 2: Short-Term Strategy (Weeks 2-4)**
246
+ - Step 4: [Specific action with details]
247
+ - Step 5: [Specific action with details]
248
+
249
+ **Phase 3: Medium-Term Growth (Month 2-3)**
250
+ - Step 6: [Specific action with details]
251
+ - Step 7: [Specific action with details]
252
+
253
+ 3. **Tools & Resources** β€” Specific tools, platforms, or resources they should
254
+ use (name actual products, websites, methods).
255
+
256
+ 4. **Expected Results** β€” What outcomes to expect and rough timelines.
257
+
258
+ 5. **Key Metrics to Track** β€” 3-4 specific numbers they should monitor to
259
+ know if the plan is working.
260
+
261
+ Make this response VERY detailed and thorough. This is the deliverable.
262
+
263
+ End your response by asking:
264
+ "Is there anything else you'd like help with, or any other challenges you're facing?"
265
+ """
266
+ + _OUTPUT_FORMAT
267
+ )
268
+
269
+ # ---------------------------------------------------------------------------
270
+ # Lookup
271
+ # ---------------------------------------------------------------------------
272
+
273
+ from models import Phase # noqa: E402
274
+
275
+ PHASE_PROMPTS: dict[Phase, str] = {
276
+ Phase.DISCOVERY: DISCOVERY_PROMPT,
277
+ Phase.EXPLORATION: EXPLORATION_PROMPT,
278
+ Phase.CONSTRAINTS: CONSTRAINTS_PROMPT,
279
+ Phase.SOLUTION: SOLUTION_PROMPT,
280
+ Phase.REFINEMENT: REFINEMENT_PROMPT,
281
+ }
backend/requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ fastapi==0.115.0
2
+ uvicorn[standard]==0.30.6
3
+ langchain-groq==0.2.4
4
+ langchain-core==0.3.51
5
+ langgraph==0.2.60
6
+ pydantic==2.10.0
7
+ aiosqlite==0.20.0
8
+ python-dotenv==1.0.1
backend/storage.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """SQLite-backed async session storage."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ import os
7
+ from datetime import datetime, timezone
8
+
9
+ import aiosqlite
10
+
11
+ from models import Message, Phase, SessionState
12
+
13
+ DB_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "data")
14
+ DB_PATH = os.path.join(DB_DIR, "sessions.db")
15
+
16
+ # ---------------------------------------------------------------------------
17
+ # DB initialisation
18
+ # ---------------------------------------------------------------------------
19
+
20
+ async def init_db() -> None:
21
+ """Create the sessions table if it doesn't exist."""
22
+ os.makedirs(DB_DIR, exist_ok=True)
23
+ async with aiosqlite.connect(DB_PATH) as db:
24
+ await db.execute(
25
+ """
26
+ CREATE TABLE IF NOT EXISTS sessions (
27
+ session_id TEXT PRIMARY KEY,
28
+ state_json TEXT NOT NULL,
29
+ created_at TEXT NOT NULL,
30
+ updated_at TEXT NOT NULL
31
+ )
32
+ """
33
+ )
34
+ await db.commit()
35
+
36
+
37
+ # ---------------------------------------------------------------------------
38
+ # CRUD helpers
39
+ # ---------------------------------------------------------------------------
40
+
41
+ def _now() -> str:
42
+ return datetime.now(timezone.utc).isoformat()
43
+
44
+
45
+ async def load_session(session_id: str) -> SessionState | None:
46
+ """Load a session from the database. Returns None if not found."""
47
+ async with aiosqlite.connect(DB_PATH) as db:
48
+ cursor = await db.execute(
49
+ "SELECT state_json FROM sessions WHERE session_id = ?",
50
+ (session_id,),
51
+ )
52
+ row = await cursor.fetchone()
53
+ if row is None:
54
+ return None
55
+ data = json.loads(row[0])
56
+ return SessionState(**data)
57
+
58
+
59
+ async def save_session(session_id: str, state: SessionState) -> None:
60
+ """Insert or update a session."""
61
+ now = _now()
62
+ state.updated_at = now
63
+ if not state.created_at:
64
+ state.created_at = now
65
+ state_json = state.model_dump_json()
66
+ async with aiosqlite.connect(DB_PATH) as db:
67
+ await db.execute(
68
+ """
69
+ INSERT INTO sessions (session_id, state_json, created_at, updated_at)
70
+ VALUES (?, ?, ?, ?)
71
+ ON CONFLICT(session_id)
72
+ DO UPDATE SET state_json = excluded.state_json,
73
+ updated_at = excluded.updated_at
74
+ """,
75
+ (session_id, state_json, state.created_at, now),
76
+ )
77
+ await db.commit()
78
+
79
+
80
+ async def list_sessions() -> list[dict]:
81
+ """Return lightweight info about all sessions."""
82
+ async with aiosqlite.connect(DB_PATH) as db:
83
+ cursor = await db.execute(
84
+ "SELECT session_id, state_json, created_at, updated_at FROM sessions ORDER BY updated_at DESC"
85
+ )
86
+ rows = await cursor.fetchall()
87
+
88
+ results = []
89
+ for sid, sj, ca, ua in rows:
90
+ data = json.loads(sj)
91
+ results.append(
92
+ {
93
+ "session_id": sid,
94
+ "phase": data.get("phase", "discovery"),
95
+ "confidence": data.get("confidence", 0.0),
96
+ "message_count": len(data.get("messages", [])),
97
+ "created_at": ca,
98
+ "updated_at": ua,
99
+ }
100
+ )
101
+ return results
102
+
103
+
104
+ async def delete_session(session_id: str) -> bool:
105
+ """Delete a session. Returns True if it existed."""
106
+ async with aiosqlite.connect(DB_PATH) as db:
107
+ cursor = await db.execute(
108
+ "DELETE FROM sessions WHERE session_id = ?", (session_id,)
109
+ )
110
+ await db.commit()
111
+ return cursor.rowcount > 0
frontend/app.js ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* ═══════════════════════════════════════════════════════════════
2
+ ConsultAI β€” Chat Client (Vanilla JS)
3
+ ═══════════════════════════════════════════════════════════════ */
4
+
5
+ const API_BASE = window.location.origin;
6
+
7
+ // ─── State ────────────────────────────────────────────────────
8
+ let currentSessionId = null;
9
+ let isWaiting = false;
10
+
11
+ // ─── DOM refs ─────────────────────────────────────────────────
12
+ const $messages = document.getElementById("messages");
13
+ const $msgContainer = document.getElementById("messagesContainer");
14
+ const $input = document.getElementById("userInput");
15
+ const $btnSend = document.getElementById("btnSend");
16
+ const $btnNewChat = document.getElementById("btnNewChat");
17
+ const $btnToggle = document.getElementById("btnToggleSidebar");
18
+ const $sidebar = document.getElementById("sidebar");
19
+ const $sessionList = document.getElementById("sessionList");
20
+ const $welcomeScreen = document.getElementById("welcomeScreen");
21
+
22
+ // ─── UUID generator ───────────────────────────────────────────
23
+ function uuid() {
24
+ return "xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx".replace(/[xy]/g, c => {
25
+ const r = (Math.random() * 16) | 0;
26
+ return (c === "x" ? r : (r & 0x3) | 0x8).toString(16);
27
+ });
28
+ }
29
+
30
+ // ─── Simple markdown β†’ HTML ───────────────────────────────────
31
+ function renderMarkdown(text) {
32
+ if (!text) return "";
33
+ let html = text
34
+ // Code blocks
35
+ .replace(/```(\w*)\n([\s\S]*?)```/g, '<pre><code>$2</code></pre>')
36
+ // Inline code
37
+ .replace(/`([^`]+)`/g, '<code>$1</code>')
38
+ // Bold
39
+ .replace(/\*\*(.+?)\*\*/g, '<strong>$1</strong>')
40
+ // Italic
41
+ .replace(/\*(.+?)\*/g, '<em>$1</em>')
42
+ // Headers
43
+ .replace(/^### (.+)$/gm, '<h3>$1</h3>')
44
+ .replace(/^## (.+)$/gm, '<h2>$1</h2>')
45
+ .replace(/^# (.+)$/gm, '<h1>$1</h1>')
46
+ // Blockquote
47
+ .replace(/^> (.+)$/gm, '<blockquote>$1</blockquote>')
48
+ // Unordered list items
49
+ .replace(/^[-*] (.+)$/gm, '<li>$1</li>')
50
+ // Numbered list items
51
+ .replace(/^\d+\. (.+)$/gm, '<li>$1</li>');
52
+
53
+ // Wrap consecutive <li> in <ul>
54
+ html = html.replace(/((?:<li>.*<\/li>\n?)+)/g, '<ul>$1</ul>');
55
+ // Paragraphs (lines not already wrapped in block elements)
56
+ // Allow <strong> and <em> lines to be wrapped in <p> (they're inline, not block)
57
+ html = html.replace(/^(?!<[hupbol]|<li|<code|<pre|<block|<ul|<div)(.+)$/gm, '<p>$1</p>');
58
+ // Clean up extra newlines
59
+ html = html.replace(/\n{2,}/g, '\n');
60
+
61
+ // Highlight questions β€” wrap <li> or <p> that contain a "?" in a styled box
62
+ html = html.replace(
63
+ /<li>(.*?\?)\s*<\/li>/g,
64
+ '<li class="question-item">$1</li>'
65
+ );
66
+ html = html.replace(
67
+ /<p>(.*?\?)\s*<\/p>/g,
68
+ '<div class="question-highlight"><span class="question-icon">❓</span><p>$1</p></div>'
69
+ );
70
+
71
+ return html;
72
+ }
73
+
74
+
75
+
76
+ // ─── Add message to chat ──────────────────────────────────────
77
+ function addMessage(role, content) {
78
+ // Hide welcome
79
+ if ($welcomeScreen) $welcomeScreen.style.display = "none";
80
+
81
+ const div = document.createElement("div");
82
+ div.className = `message ${role}-message`;
83
+
84
+ const avatar = document.createElement("div");
85
+ avatar.className = "message-avatar";
86
+ avatar.textContent = role === "user" ? "U" : "β—†";
87
+
88
+ const bubble = document.createElement("div");
89
+ bubble.className = "message-bubble";
90
+ bubble.innerHTML = role === "assistant" ? renderMarkdown(content) : escapeHtml(content);
91
+
92
+ div.appendChild(avatar);
93
+ div.appendChild(bubble);
94
+ $messages.appendChild(div);
95
+ scrollToBottom();
96
+ }
97
+
98
+ function escapeHtml(text) {
99
+ const d = document.createElement("div");
100
+ d.textContent = text;
101
+ return d.innerHTML.replace(/\n/g, "<br>");
102
+ }
103
+
104
+ function scrollToBottom() {
105
+ requestAnimationFrame(() => {
106
+ $msgContainer.scrollTop = $msgContainer.scrollHeight;
107
+ });
108
+ }
109
+
110
+ // ─── Typing indicator ─────────────────────────────────────────
111
+ function showTyping() {
112
+ const div = document.createElement("div");
113
+ div.className = "typing-indicator";
114
+ div.id = "typingIndicator";
115
+ div.innerHTML = `
116
+ <div class="message-avatar" style="background:var(--accent-gradient);color:white;width:36px;height:36px;border-radius:50%;display:flex;align-items:center;justify-content:center;font-size:14px;font-weight:700;">β—†</div>
117
+ <div class="typing-dots"><span></span><span></span><span></span></div>
118
+ `;
119
+ $messages.appendChild(div);
120
+ scrollToBottom();
121
+ }
122
+
123
+ function hideTyping() {
124
+ const el = document.getElementById("typingIndicator");
125
+ if (el) el.remove();
126
+ }
127
+
128
+ // ─── API calls ────────────────────────────────────────────────
129
+ // ─── Create session via API & show greeting ──────────────────
130
+ async function createNewSession() {
131
+ try {
132
+ const res = await fetch(`${API_BASE}/sessions/new`, { method: "POST" });
133
+ const data = await res.json();
134
+ currentSessionId = data.session_id;
135
+
136
+ // Clear and show greeting
137
+ $messages.innerHTML = "";
138
+ if ($welcomeScreen) {
139
+ $messages.appendChild($welcomeScreen);
140
+ $welcomeScreen.style.display = "none";
141
+ }
142
+ addMessage("assistant", data.greeting);
143
+ loadSessions();
144
+ return data.session_id;
145
+ } catch {
146
+ // Fallback: generate local id
147
+ currentSessionId = uuid();
148
+ return currentSessionId;
149
+ }
150
+ }
151
+
152
+ async function sendMessage(message) {
153
+ if (!message.trim() || isWaiting) return;
154
+
155
+ // Auto-create session if none
156
+ if (!currentSessionId) {
157
+ await createNewSession();
158
+ }
159
+
160
+ addMessage("user", message);
161
+ $input.value = "";
162
+ $input.style.height = "auto";
163
+ $btnSend.disabled = true;
164
+ isWaiting = true;
165
+ showTyping();
166
+
167
+ try {
168
+ const res = await fetch(`${API_BASE}/chat`, {
169
+ method: "POST",
170
+ headers: { "Content-Type": "application/json" },
171
+ body: JSON.stringify({ session_id: currentSessionId, message }),
172
+ });
173
+ if (!res.ok) {
174
+ const err = await res.json().catch(() => ({}));
175
+ throw new Error(err.detail || `HTTP ${res.status}`);
176
+ }
177
+ const data = await res.json();
178
+ hideTyping();
179
+ addMessage("assistant", data.reply);
180
+ loadSessions(); // refresh sidebar
181
+ } catch (err) {
182
+ hideTyping();
183
+ addMessage("assistant", `⚠️ **Error:** ${err.message}\n\nPlease check that the server is running and try again.`);
184
+ } finally {
185
+ isWaiting = false;
186
+ $btnSend.disabled = !$input.value.trim();
187
+ }
188
+ }
189
+
190
+ async function loadSessions() {
191
+ try {
192
+ const res = await fetch(`${API_BASE}/sessions`);
193
+ const data = await res.json();
194
+ renderSessionList(data.sessions || []);
195
+ } catch { /* server might not be ready */ }
196
+ }
197
+
198
+ async function loadSessionHistory(sessionId) {
199
+ try {
200
+ const res = await fetch(`${API_BASE}/sessions/${sessionId}`);
201
+ if (!res.ok) return;
202
+ const data = await res.json();
203
+
204
+ // Clear messages
205
+ $messages.innerHTML = "";
206
+ if ($welcomeScreen) {
207
+ $messages.appendChild($welcomeScreen);
208
+ $welcomeScreen.style.display = "none";
209
+ }
210
+
211
+ // Re-render history
212
+ (data.messages || []).forEach(m => addMessage(m.role, m.content));
213
+
214
+ currentSessionId = sessionId;
215
+ highlightActiveSession();
216
+ } catch { /* ignore */ }
217
+ }
218
+
219
+ async function deleteSessionById(sessionId) {
220
+ try {
221
+ await fetch(`${API_BASE}/sessions/${sessionId}`, { method: "DELETE" });
222
+ if (currentSessionId === sessionId) {
223
+ currentSessionId = null;
224
+ $messages.innerHTML = "";
225
+ if ($welcomeScreen) {
226
+ $messages.appendChild($welcomeScreen);
227
+ $welcomeScreen.style.display = "";
228
+ }
229
+ }
230
+ loadSessions();
231
+ } catch { /* ignore */ }
232
+ }
233
+
234
+ // ─── Session list rendering ──────────────────────────────────
235
+ function renderSessionList(sessions) {
236
+ $sessionList.innerHTML = "";
237
+ sessions.forEach(s => {
238
+ const div = document.createElement("div");
239
+ div.className = "session-item" + (s.session_id === currentSessionId ? " active" : "");
240
+ div.innerHTML = `
241
+ <span class="session-item-label">Session Β· ${s.message_count || 0} msgs</span>
242
+ <button class="session-item-delete" title="Delete">Γ—</button>
243
+ `;
244
+ div.querySelector(".session-item-label").addEventListener("click", () => {
245
+ loadSessionHistory(s.session_id);
246
+ });
247
+ div.querySelector(".session-item-delete").addEventListener("click", e => {
248
+ e.stopPropagation();
249
+ deleteSessionById(s.session_id);
250
+ });
251
+ $sessionList.appendChild(div);
252
+ });
253
+ }
254
+
255
+ function highlightActiveSession() {
256
+ document.querySelectorAll(".session-item").forEach(el => el.classList.remove("active"));
257
+ // Not trivial to match back β€” rely on re-render from loadSessions
258
+ }
259
+
260
+ // ─── New chat ─────────────────────────────────────────────────
261
+ async function startNewChat() {
262
+ currentSessionId = null;
263
+ await createNewSession();
264
+ $input.focus();
265
+ }
266
+
267
+ // ─── Event listeners ──────────────────────────────────────────
268
+ $btnSend.addEventListener("click", () => sendMessage($input.value));
269
+
270
+ $input.addEventListener("input", () => {
271
+ $btnSend.disabled = !$input.value.trim() || isWaiting;
272
+ // Auto-resize
273
+ $input.style.height = "auto";
274
+ $input.style.height = Math.min($input.scrollHeight, 150) + "px";
275
+ });
276
+
277
+ $input.addEventListener("keydown", e => {
278
+ if (e.key === "Enter" && !e.shiftKey) {
279
+ e.preventDefault();
280
+ sendMessage($input.value);
281
+ }
282
+ });
283
+
284
+ $btnNewChat.addEventListener("click", startNewChat);
285
+
286
+ $btnToggle.addEventListener("click", () => {
287
+ $sidebar.classList.toggle("hidden");
288
+ });
289
+
290
+ // Suggestion chips
291
+ document.querySelectorAll(".suggestion-chip").forEach(chip => {
292
+ chip.addEventListener("click", () => {
293
+ const msg = chip.getAttribute("data-msg");
294
+ sendMessage(msg);
295
+ });
296
+ });
297
+
298
+ // ─── Init ─────────────────────────────────────────────────────
299
+ loadSessions();
300
+ createNewSession(); // auto-start first session with greeting
frontend/index.html ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+
4
+ <head>
5
+ <meta charset="UTF-8" />
6
+ <meta name="viewport" content="width=device-width, initial-scale=1.0" />
7
+ <title>ConsultAI β€” Intelligent Problem-Solving</title>
8
+ <meta name="description"
9
+ content="AI-powered consultant chatbot for structured problem-solving across any domain." />
10
+ <link rel="preconnect" href="https://fonts.googleapis.com" />
11
+ <link rel="preconnect" href="https://fonts.gstatic.com" crossorigin />
12
+ <link href="https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap"
13
+ rel="stylesheet" />
14
+ <link rel="stylesheet" href="/static/style.css" />
15
+ </head>
16
+
17
+ <body>
18
+ <!-- Sidebar -->
19
+ <aside class="sidebar" id="sidebar">
20
+ <div class="sidebar-header">
21
+ <div class="logo">
22
+ <span class="logo-icon">β—†</span>
23
+ <span class="logo-text">ConsultAI</span>
24
+ </div>
25
+ <button class="btn-new-chat" id="btnNewChat" title="New Consultation">
26
+ <svg width="18" height="18" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
27
+ <line x1="12" y1="5" x2="12" y2="19" />
28
+ <line x1="5" y1="12" x2="19" y2="12" />
29
+ </svg>
30
+ </button>
31
+ </div>
32
+ <div class="session-list" id="sessionList">
33
+ <!-- Sessions populated by JS -->
34
+ </div>
35
+ <div class="sidebar-footer">
36
+ <div class="sidebar-footer-text">Powered by Groq + LangGraph</div>
37
+ </div>
38
+ </aside>
39
+
40
+ <!-- Main chat area -->
41
+ <main class="main-area">
42
+ <!-- Top bar -->
43
+ <header class="topbar">
44
+ <button class="btn-toggle-sidebar" id="btnToggleSidebar" title="Toggle sidebar">
45
+ <svg width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
46
+ <line x1="3" y1="6" x2="21" y2="6" />
47
+ <line x1="3" y1="12" x2="21" y2="12" />
48
+ <line x1="3" y1="18" x2="21" y2="18" />
49
+ </svg>
50
+ </button>
51
+ <div class="topbar-title">ConsultAI</div>
52
+ </header>
53
+
54
+ <!-- Messages -->
55
+ <div class="messages-container" id="messagesContainer">
56
+ <div class="messages" id="messages">
57
+ <!-- Welcome -->
58
+ <div class="welcome-screen" id="welcomeScreen">
59
+ <div class="welcome-icon">β—†</div>
60
+ <h1>Welcome to ConsultAI</h1>
61
+ <p>I'm your AI consultant. Describe any problem or challenge, and I'll guide you through a
62
+ structured analysis to find the best solution.</p>
63
+ <div class="welcome-suggestions">
64
+ <button class="suggestion-chip"
65
+ data-msg="I'm building a SaaS product and struggling with user retention after the free trial ends.">SaaS
66
+ Retention</button>
67
+ <button class="suggestion-chip"
68
+ data-msg="Our supply chain has major inefficiencies and we're losing money on logistics.">Supply
69
+ Chain</button>
70
+ <button class="suggestion-chip"
71
+ data-msg="I want to launch an online education platform but I'm not sure about the best approach.">EdTech
72
+ Launch</button>
73
+ </div>
74
+ </div>
75
+ </div>
76
+ </div>
77
+
78
+ <!-- Input -->
79
+ <div class="input-area">
80
+ <div class="input-wrapper">
81
+ <textarea id="userInput" placeholder="Describe your problem or respond to my questions…" rows="1"
82
+ autofocus></textarea>
83
+ <button class="btn-send" id="btnSend" title="Send message" disabled>
84
+ <svg width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
85
+ <line x1="22" y1="2" x2="11" y2="13" />
86
+ <polygon points="22 2 15 22 11 13 2 9 22 2" />
87
+ </svg>
88
+ </button>
89
+ </div>
90
+ <div class="input-hint">Press Enter to send Β· Shift+Enter for new line</div>
91
+ </div>
92
+ </main>
93
+
94
+ <script src="/static/app.js"></script>
95
+ </body>
96
+
97
+ </html>
frontend/style.css ADDED
@@ -0,0 +1,691 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* ═══════════════════════════════════════════════════════════════
2
+ ConsultAI β€” Premium Dark-Mode Chat UI
3
+ ═══════════════════════════════════════════════════════════════ */
4
+
5
+ /* --- CSS Variables / Design Tokens --- */
6
+ :root {
7
+ --bg-primary: #0a0e1a;
8
+ --bg-secondary: #111827;
9
+ --bg-tertiary: #1a2038;
10
+ --bg-surface: rgba(255, 255, 255, 0.04);
11
+ --bg-glass: rgba(255, 255, 255, 0.06);
12
+ --bg-glass-hover: rgba(255, 255, 255, 0.10);
13
+
14
+ --text-primary: #f0f2f5;
15
+ --text-secondary: #9ca3af;
16
+ --text-muted: #6b7280;
17
+
18
+ --accent-primary: #6366f1;
19
+ --accent-secondary: #818cf8;
20
+ --accent-glow: rgba(99, 102, 241, 0.25);
21
+ --accent-gradient: linear-gradient(135deg, #6366f1 0%, #8b5cf6 50%, #a78bfa 100%);
22
+
23
+ --phase-discovery: #3b82f6;
24
+ --phase-exploration: #8b5cf6;
25
+ --phase-constraints: #f59e0b;
26
+ --phase-solution: #10b981;
27
+ --phase-refinement: #06d6a0;
28
+
29
+ --border-color: rgba(255, 255, 255, 0.08);
30
+ --border-accent: rgba(99, 102, 241, 0.3);
31
+
32
+ --shadow-sm: 0 1px 2px rgba(0, 0, 0, 0.3);
33
+ --shadow-md: 0 4px 12px rgba(0, 0, 0, 0.4);
34
+ --shadow-lg: 0 8px 32px rgba(0, 0, 0, 0.5);
35
+ --shadow-glow: 0 0 20px var(--accent-glow);
36
+
37
+ --radius-sm: 8px;
38
+ --radius-md: 12px;
39
+ --radius-lg: 16px;
40
+ --radius-xl: 20px;
41
+
42
+ --sidebar-width: 280px;
43
+ --topbar-height: 60px;
44
+
45
+ --font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif;
46
+ --transition-fast: 150ms cubic-bezier(0.4, 0, 0.2, 1);
47
+ --transition-normal: 250ms cubic-bezier(0.4, 0, 0.2, 1);
48
+ --transition-slow: 400ms cubic-bezier(0.4, 0, 0.2, 1);
49
+ }
50
+
51
+ /* --- Reset --- */
52
+ *,
53
+ *::before,
54
+ *::after {
55
+ box-sizing: border-box;
56
+ margin: 0;
57
+ padding: 0;
58
+ }
59
+
60
+ html {
61
+ font-size: 16px;
62
+ -webkit-font-smoothing: antialiased;
63
+ -moz-osx-font-smoothing: grayscale;
64
+ }
65
+
66
+ body {
67
+ font-family: var(--font-family);
68
+ background: var(--bg-primary);
69
+ color: var(--text-primary);
70
+ display: flex;
71
+ height: 100vh;
72
+ overflow: hidden;
73
+ }
74
+
75
+ /* --- Scrollbar --- */
76
+ ::-webkit-scrollbar {
77
+ width: 6px;
78
+ }
79
+
80
+ ::-webkit-scrollbar-track {
81
+ background: transparent;
82
+ }
83
+
84
+ ::-webkit-scrollbar-thumb {
85
+ background: rgba(255, 255, 255, 0.12);
86
+ border-radius: 3px;
87
+ }
88
+
89
+ ::-webkit-scrollbar-thumb:hover {
90
+ background: rgba(255, 255, 255, 0.2);
91
+ }
92
+
93
+ /* ═══════════════════════════════════════════════════
94
+ SIDEBAR
95
+ ═══════════════════════════════════════════════════ */
96
+ .sidebar {
97
+ width: var(--sidebar-width);
98
+ min-width: var(--sidebar-width);
99
+ background: var(--bg-secondary);
100
+ border-right: 1px solid var(--border-color);
101
+ display: flex;
102
+ flex-direction: column;
103
+ transition: transform var(--transition-normal), opacity var(--transition-normal);
104
+ z-index: 100;
105
+ }
106
+
107
+ .sidebar.hidden {
108
+ transform: translateX(-100%);
109
+ position: absolute;
110
+ height: 100%;
111
+ opacity: 0;
112
+ pointer-events: none;
113
+ }
114
+
115
+ .sidebar-header {
116
+ padding: 16px 20px;
117
+ display: flex;
118
+ align-items: center;
119
+ justify-content: space-between;
120
+ border-bottom: 1px solid var(--border-color);
121
+ }
122
+
123
+ .logo {
124
+ display: flex;
125
+ align-items: center;
126
+ gap: 10px;
127
+ }
128
+
129
+ .logo-icon {
130
+ font-size: 22px;
131
+ background: var(--accent-gradient);
132
+ -webkit-background-clip: text;
133
+ -webkit-text-fill-color: transparent;
134
+ background-clip: text;
135
+ }
136
+
137
+ .logo-text {
138
+ font-size: 18px;
139
+ font-weight: 700;
140
+ letter-spacing: -0.02em;
141
+ }
142
+
143
+ .btn-new-chat {
144
+ background: var(--bg-glass);
145
+ border: 1px solid var(--border-color);
146
+ color: var(--text-primary);
147
+ border-radius: var(--radius-sm);
148
+ padding: 8px;
149
+ cursor: pointer;
150
+ transition: all var(--transition-fast);
151
+ display: flex;
152
+ align-items: center;
153
+ justify-content: center;
154
+ }
155
+
156
+ .btn-new-chat:hover {
157
+ background: var(--accent-primary);
158
+ border-color: var(--accent-primary);
159
+ box-shadow: var(--shadow-glow);
160
+ }
161
+
162
+ .session-list {
163
+ flex: 1;
164
+ overflow-y: auto;
165
+ padding: 8px;
166
+ }
167
+
168
+ .session-item {
169
+ padding: 10px 14px;
170
+ border-radius: var(--radius-sm);
171
+ cursor: pointer;
172
+ transition: all var(--transition-fast);
173
+ display: flex;
174
+ align-items: center;
175
+ justify-content: space-between;
176
+ margin-bottom: 2px;
177
+ font-size: 13px;
178
+ color: var(--text-secondary);
179
+ }
180
+
181
+ .session-item:hover {
182
+ background: var(--bg-glass-hover);
183
+ color: var(--text-primary);
184
+ }
185
+
186
+ .session-item.active {
187
+ background: var(--accent-glow);
188
+ color: var(--text-primary);
189
+ border: 1px solid var(--border-accent);
190
+ }
191
+
192
+ .session-item-label {
193
+ overflow: hidden;
194
+ text-overflow: ellipsis;
195
+ white-space: nowrap;
196
+ flex: 1;
197
+ }
198
+
199
+ .session-item-delete {
200
+ opacity: 0;
201
+ background: none;
202
+ border: none;
203
+ color: var(--text-muted);
204
+ cursor: pointer;
205
+ padding: 2px 4px;
206
+ font-size: 16px;
207
+ line-height: 1;
208
+ transition: all var(--transition-fast);
209
+ }
210
+
211
+ .session-item:hover .session-item-delete {
212
+ opacity: 1;
213
+ }
214
+
215
+ .session-item-delete:hover {
216
+ color: #ef4444;
217
+ }
218
+
219
+ .sidebar-footer {
220
+ padding: 12px 20px;
221
+ border-top: 1px solid var(--border-color);
222
+ }
223
+
224
+ .sidebar-footer-text {
225
+ font-size: 11px;
226
+ color: var(--text-muted);
227
+ text-align: center;
228
+ }
229
+
230
+ /* ═══════════════════════════════════════════════════
231
+ MAIN AREA
232
+ ═══════════════════════════════════════════════════ */
233
+ .main-area {
234
+ flex: 1;
235
+ display: flex;
236
+ flex-direction: column;
237
+ min-width: 0;
238
+ position: relative;
239
+ }
240
+
241
+ /* --- Top Bar --- */
242
+ .topbar {
243
+ height: var(--topbar-height);
244
+ padding: 0 20px;
245
+ display: flex;
246
+ align-items: center;
247
+ gap: 16px;
248
+ border-bottom: 1px solid var(--border-color);
249
+ background: rgba(10, 14, 26, 0.8);
250
+ backdrop-filter: blur(12px);
251
+ -webkit-backdrop-filter: blur(12px);
252
+ z-index: 10;
253
+ }
254
+
255
+ .btn-toggle-sidebar {
256
+ background: none;
257
+ border: none;
258
+ color: var(--text-secondary);
259
+ cursor: pointer;
260
+ padding: 6px;
261
+ border-radius: var(--radius-sm);
262
+ display: flex;
263
+ transition: all var(--transition-fast);
264
+ }
265
+
266
+ .btn-toggle-sidebar:hover {
267
+ background: var(--bg-glass);
268
+ color: var(--text-primary);
269
+ }
270
+
271
+ .topbar-title {
272
+ font-size: 16px;
273
+ font-weight: 600;
274
+ color: var(--text-primary);
275
+ letter-spacing: -0.01em;
276
+ }
277
+
278
+ /* ═══════════════════════════════════════════════════
279
+ MESSAGES
280
+ ═══════════════════════════════════════════════════ */
281
+ .messages-container {
282
+ flex: 1;
283
+ overflow-y: auto;
284
+ scroll-behavior: smooth;
285
+ }
286
+
287
+ .messages {
288
+ max-width: 820px;
289
+ margin: 0 auto;
290
+ padding: 24px 20px 100px;
291
+ }
292
+
293
+ /* --- Welcome Screen --- */
294
+ .welcome-screen {
295
+ display: flex;
296
+ flex-direction: column;
297
+ align-items: center;
298
+ justify-content: center;
299
+ text-align: center;
300
+ padding: 80px 20px 40px;
301
+ animation: fadeInUp 0.6s ease-out;
302
+ }
303
+
304
+ .welcome-icon {
305
+ font-size: 48px;
306
+ background: var(--accent-gradient);
307
+ -webkit-background-clip: text;
308
+ -webkit-text-fill-color: transparent;
309
+ background-clip: text;
310
+ margin-bottom: 16px;
311
+ }
312
+
313
+ .welcome-screen h1 {
314
+ font-size: 28px;
315
+ font-weight: 700;
316
+ margin-bottom: 12px;
317
+ letter-spacing: -0.02em;
318
+ }
319
+
320
+ .welcome-screen p {
321
+ color: var(--text-secondary);
322
+ font-size: 15px;
323
+ max-width: 500px;
324
+ line-height: 1.6;
325
+ margin-bottom: 32px;
326
+ }
327
+
328
+ .welcome-suggestions {
329
+ display: flex;
330
+ flex-wrap: wrap;
331
+ gap: 10px;
332
+ justify-content: center;
333
+ }
334
+
335
+ .suggestion-chip {
336
+ background: var(--bg-glass);
337
+ border: 1px solid var(--border-color);
338
+ color: var(--text-secondary);
339
+ padding: 10px 18px;
340
+ border-radius: 100px;
341
+ font-size: 13px;
342
+ cursor: pointer;
343
+ transition: all var(--transition-fast);
344
+ font-family: var(--font-family);
345
+ }
346
+
347
+ .suggestion-chip:hover {
348
+ background: var(--bg-glass-hover);
349
+ color: var(--text-primary);
350
+ border-color: var(--border-accent);
351
+ box-shadow: var(--shadow-glow);
352
+ }
353
+
354
+ /* --- Message Bubbles --- */
355
+ .message {
356
+ display: flex;
357
+ gap: 12px;
358
+ margin-bottom: 20px;
359
+ animation: fadeInUp 0.35s ease-out;
360
+ }
361
+
362
+ .message.user-message {
363
+ justify-content: flex-end;
364
+ }
365
+
366
+ .message.assistant-message {
367
+ justify-content: flex-start;
368
+ }
369
+
370
+ .message-avatar {
371
+ width: 36px;
372
+ height: 36px;
373
+ border-radius: 50%;
374
+ display: flex;
375
+ align-items: center;
376
+ justify-content: center;
377
+ font-size: 14px;
378
+ font-weight: 700;
379
+ flex-shrink: 0;
380
+ margin-top: 2px;
381
+ }
382
+
383
+ .assistant-message .message-avatar {
384
+ background: var(--accent-gradient);
385
+ color: white;
386
+ box-shadow: var(--shadow-glow);
387
+ }
388
+
389
+ .user-message .message-avatar {
390
+ background: var(--bg-glass);
391
+ border: 1px solid var(--border-color);
392
+ color: var(--text-secondary);
393
+ order: 2;
394
+ }
395
+
396
+ .message-bubble {
397
+ max-width: 75%;
398
+ padding: 14px 18px;
399
+ border-radius: var(--radius-lg);
400
+ line-height: 1.65;
401
+ font-size: 14px;
402
+ }
403
+
404
+ .user-message .message-bubble {
405
+ background: var(--accent-primary);
406
+ color: white;
407
+ border-bottom-right-radius: 4px;
408
+ }
409
+
410
+ .assistant-message .message-bubble {
411
+ background: var(--bg-glass);
412
+ border: 1px solid var(--border-color);
413
+ backdrop-filter: blur(8px);
414
+ -webkit-backdrop-filter: blur(8px);
415
+ border-bottom-left-radius: 4px;
416
+ }
417
+
418
+ /* Markdown inside assistant bubbles */
419
+ .message-bubble h1,
420
+ .message-bubble h2,
421
+ .message-bubble h3 {
422
+ font-size: 15px;
423
+ font-weight: 600;
424
+ margin: 12px 0 6px;
425
+ color: var(--text-primary);
426
+ }
427
+
428
+ .message-bubble h1:first-child,
429
+ .message-bubble h2:first-child,
430
+ .message-bubble h3:first-child {
431
+ margin-top: 0;
432
+ }
433
+
434
+ .message-bubble p {
435
+ margin: 10px 0;
436
+ }
437
+
438
+ .message-bubble ul,
439
+ .message-bubble ol {
440
+ margin: 6px 0;
441
+ padding-left: 20px;
442
+ }
443
+
444
+ .message-bubble li {
445
+ margin: 3px 0;
446
+ }
447
+
448
+ /* Question highlighting */
449
+ .message-bubble .question-highlight {
450
+ background: rgba(99, 102, 241, 0.08);
451
+ border: 1px solid rgba(99, 102, 241, 0.25);
452
+ border-left: 3px solid var(--accent-primary);
453
+ border-radius: var(--radius-sm);
454
+ padding: 10px 14px;
455
+ margin: 10px 0;
456
+ display: flex;
457
+ align-items: flex-start;
458
+ gap: 10px;
459
+ }
460
+
461
+ .message-bubble .question-highlight p {
462
+ margin: 0;
463
+ color: var(--text-primary);
464
+ font-weight: 500;
465
+ }
466
+
467
+ .message-bubble .question-icon {
468
+ font-size: 16px;
469
+ flex-shrink: 0;
470
+ margin-top: 1px;
471
+ }
472
+
473
+ .message-bubble li.question-item {
474
+ background: rgba(99, 102, 241, 0.06);
475
+ border-left: 2px solid var(--accent-primary);
476
+ padding: 6px 10px;
477
+ border-radius: 0 var(--radius-sm) var(--radius-sm) 0;
478
+ margin: 6px 0;
479
+ list-style: none;
480
+ font-weight: 500;
481
+ }
482
+
483
+ .message-bubble strong {
484
+ color: var(--accent-secondary);
485
+ font-weight: 600;
486
+ }
487
+
488
+ .message-bubble code {
489
+ background: rgba(0, 0, 0, 0.3);
490
+ padding: 2px 6px;
491
+ border-radius: 4px;
492
+ font-size: 13px;
493
+ font-family: 'Fira Code', monospace;
494
+ }
495
+
496
+ .message-bubble pre {
497
+ background: rgba(0, 0, 0, 0.3);
498
+ padding: 12px;
499
+ border-radius: var(--radius-sm);
500
+ overflow-x: auto;
501
+ margin: 8px 0;
502
+ }
503
+
504
+ .message-bubble pre code {
505
+ background: none;
506
+ padding: 0;
507
+ }
508
+
509
+ .message-bubble blockquote {
510
+ border-left: 3px solid var(--accent-primary);
511
+ padding-left: 12px;
512
+ color: var(--text-secondary);
513
+ margin: 8px 0;
514
+ }
515
+
516
+ /* --- Typing Indicator --- */
517
+ .typing-indicator {
518
+ display: flex;
519
+ gap: 12px;
520
+ margin-bottom: 20px;
521
+ animation: fadeInUp 0.3s ease-out;
522
+ }
523
+
524
+ .typing-dots {
525
+ display: flex;
526
+ gap: 5px;
527
+ align-items: center;
528
+ padding: 14px 18px;
529
+ background: var(--bg-glass);
530
+ border: 1px solid var(--border-color);
531
+ border-radius: var(--radius-lg);
532
+ border-bottom-left-radius: 4px;
533
+ backdrop-filter: blur(8px);
534
+ }
535
+
536
+ .typing-dots span {
537
+ width: 8px;
538
+ height: 8px;
539
+ background: var(--text-muted);
540
+ border-radius: 50%;
541
+ animation: typingBounce 1.4s infinite;
542
+ }
543
+
544
+ .typing-dots span:nth-child(2) {
545
+ animation-delay: 0.15s;
546
+ }
547
+
548
+ .typing-dots span:nth-child(3) {
549
+ animation-delay: 0.3s;
550
+ }
551
+
552
+ /* ═══════════════════════════════════════════════════
553
+ INPUT AREA
554
+ ═══════════════════════════════════════════════════ */
555
+ .input-area {
556
+ padding: 12px 20px 16px;
557
+ background: linear-gradient(180deg, transparent 0%, var(--bg-primary) 20%);
558
+ position: absolute;
559
+ bottom: 0;
560
+ left: 0;
561
+ right: 0;
562
+ }
563
+
564
+ .input-wrapper {
565
+ max-width: 820px;
566
+ margin: 0 auto;
567
+ display: flex;
568
+ align-items: flex-end;
569
+ gap: 10px;
570
+ background: var(--bg-tertiary);
571
+ border: 1px solid var(--border-color);
572
+ border-radius: var(--radius-lg);
573
+ padding: 8px 8px 8px 18px;
574
+ transition: border-color var(--transition-fast), box-shadow var(--transition-fast);
575
+ }
576
+
577
+ .input-wrapper:focus-within {
578
+ border-color: var(--accent-primary);
579
+ box-shadow: 0 0 0 3px var(--accent-glow);
580
+ }
581
+
582
+ .input-wrapper textarea {
583
+ flex: 1;
584
+ background: none;
585
+ border: none;
586
+ color: var(--text-primary);
587
+ font-family: var(--font-family);
588
+ font-size: 14px;
589
+ line-height: 1.5;
590
+ resize: none;
591
+ outline: none;
592
+ max-height: 150px;
593
+ padding: 6px 0;
594
+ }
595
+
596
+ .input-wrapper textarea::placeholder {
597
+ color: var(--text-muted);
598
+ }
599
+
600
+ .btn-send {
601
+ background: var(--accent-primary);
602
+ border: none;
603
+ color: white;
604
+ border-radius: var(--radius-sm);
605
+ padding: 10px;
606
+ cursor: pointer;
607
+ display: flex;
608
+ align-items: center;
609
+ justify-content: center;
610
+ transition: all var(--transition-fast);
611
+ flex-shrink: 0;
612
+ }
613
+
614
+ .btn-send:hover:not(:disabled) {
615
+ background: var(--accent-secondary);
616
+ box-shadow: var(--shadow-glow);
617
+ }
618
+
619
+ .btn-send:disabled {
620
+ opacity: 0.3;
621
+ cursor: default;
622
+ }
623
+
624
+ .input-hint {
625
+ text-align: center;
626
+ font-size: 11px;
627
+ color: var(--text-muted);
628
+ margin-top: 6px;
629
+ }
630
+
631
+ /* ═══════════════════════════════════════════════════
632
+ ANIMATIONS
633
+ ═══════════════════════════════════════════════════ */
634
+ @keyframes fadeInUp {
635
+ from {
636
+ opacity: 0;
637
+ transform: translateY(12px);
638
+ }
639
+
640
+ to {
641
+ opacity: 1;
642
+ transform: translateY(0);
643
+ }
644
+ }
645
+
646
+ @keyframes typingBounce {
647
+
648
+ 0%,
649
+ 60%,
650
+ 100% {
651
+ transform: translateY(0);
652
+ opacity: 0.4;
653
+ }
654
+
655
+ 30% {
656
+ transform: translateY(-6px);
657
+ opacity: 1;
658
+ }
659
+ }
660
+
661
+ /* ═══════════════════════════════════════════════════
662
+ RESPONSIVE
663
+ ═══════════════════════════════════════════════════ */
664
+ @media (max-width: 768px) {
665
+ .sidebar {
666
+ position: absolute;
667
+ height: 100%;
668
+ z-index: 200;
669
+ box-shadow: var(--shadow-lg);
670
+ }
671
+
672
+ .sidebar.hidden {
673
+ transform: translateX(-100%);
674
+ }
675
+
676
+ .confidence-bar-container {
677
+ display: none;
678
+ }
679
+
680
+ .message-bubble {
681
+ max-width: 88%;
682
+ }
683
+
684
+ .welcome-screen {
685
+ padding: 40px 16px 20px;
686
+ }
687
+
688
+ .welcome-screen h1 {
689
+ font-size: 22px;
690
+ }
691
+ }