adeshboudh16 Claude Opus 4.6 commited on
Commit
b49a2dd
·
1 Parent(s): dbc9192

fix: interview turn limits, LLM fallback, and completion UI

Browse files

- Fix graph resume: use aupdate_state + ainvoke(None) so evaluate_answer
and routing actually execute after interrupt
- Switch LLM to Gemini primary with Groq fallback
- Randomize question order per interview session
- Show score inline on interview completion instead of broken report page
- Add hard turn-limit safety checks in backend and frontend
- Register asyncpg JSON codecs so feedback JSONB is parsed correctly
- Navbar logo links to role-appropriate dashboard
- Set max turns to 5

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

backend/db/connection.py CHANGED
@@ -1,15 +1,24 @@
 
1
  import os
 
2
  import asyncpg
3
 
4
  _pool: asyncpg.Pool | None = None
5
 
6
 
 
 
 
 
 
 
7
  async def init_db_pool() -> None:
8
  global _pool
9
  _pool = await asyncpg.create_pool(
10
  os.getenv("NEON_DB_URL"),
11
  min_size=1,
12
  max_size=10,
 
13
  )
14
 
15
 
 
1
+ import json
2
  import os
3
+
4
  import asyncpg
5
 
6
  _pool: asyncpg.Pool | None = None
7
 
8
 
9
+ async def _init_connection(conn: asyncpg.Connection) -> None:
10
+ """Register JSON/JSONB codecs so asyncpg auto-parses those columns."""
11
+ await conn.set_type_codec("json", encoder=json.dumps, decoder=json.loads, schema="pg_catalog")
12
+ await conn.set_type_codec("jsonb", encoder=json.dumps, decoder=json.loads, schema="pg_catalog")
13
+
14
+
15
  async def init_db_pool() -> None:
16
  global _pool
17
  _pool = await asyncpg.create_pool(
18
  os.getenv("NEON_DB_URL"),
19
  min_size=1,
20
  max_size=10,
21
+ init=_init_connection,
22
  )
23
 
24
 
backend/graph/nodes.py CHANGED
@@ -25,10 +25,13 @@ def _msg_role(msg) -> str:
25
  return "assistant"
26
 
27
 
 
 
 
28
  async def ask_question(state: InterviewState) -> dict:
29
  remaining = list(state["questions_remaining"])
30
- if not remaining:
31
- return {}
32
 
33
  question = remaining.pop(0)
34
  prompt = build_ask_question_prompt(
@@ -98,7 +101,6 @@ async def counter_question(state: InterviewState) -> dict:
98
  return {
99
  "messages": [{"role": "assistant", "content": response}],
100
  "awaiting_counter_response": True,
101
- "turn_count": state["turn_count"] + 1,
102
  "counter_questions_asked": state["counter_questions_asked"] + 1,
103
  }
104
 
@@ -156,16 +158,22 @@ def route_after_evaluation(state: InterviewState) -> str:
156
  questions_remaining = state["questions_remaining"]
157
  awaiting_counter = state["awaiting_counter_response"]
158
 
 
 
159
  # End conditions always win — no extra turns after the limit
160
- if turn_count >= 2 or not questions_remaining:
161
- return "end"
 
 
 
 
 
 
 
 
 
 
162
 
163
- # Shallow + not already in counter loop + under cap (max 2 per interview)
164
- if verdict == "shallow" and not awaiting_counter and state.get("counter_questions_asked", 0) < 2:
165
- return "counter"
166
 
167
- # Compress memory every 4 turns
168
- if turn_count % 4 == 0 and turn_count > 0:
169
- return "summarize"
170
 
171
- return "next_question"
 
25
  return "assistant"
26
 
27
 
28
+ MAX_TURNS = 5
29
+
30
+
31
  async def ask_question(state: InterviewState) -> dict:
32
  remaining = list(state["questions_remaining"])
33
+ if not remaining or state["turn_count"] >= MAX_TURNS:
34
+ return {"messages": []}
35
 
36
  question = remaining.pop(0)
37
  prompt = build_ask_question_prompt(
 
101
  return {
102
  "messages": [{"role": "assistant", "content": response}],
103
  "awaiting_counter_response": True,
 
104
  "counter_questions_asked": state["counter_questions_asked"] + 1,
105
  }
106
 
 
158
  questions_remaining = state["questions_remaining"]
159
  awaiting_counter = state["awaiting_counter_response"]
160
 
161
+ decision = "unknown"
162
+
163
  # End conditions always win — no extra turns after the limit
164
+ if turn_count >= MAX_TURNS or not questions_remaining:
165
+ decision = "end"
166
+ elif verdict == "shallow" and not awaiting_counter and state.get("counter_questions_asked", 0) < 2:
167
+ decision = "counter"
168
+ elif turn_count % 4 == 0 and turn_count > 0:
169
+ decision = "summarize"
170
+ else:
171
+ decision = "next_question"
172
+
173
+ print(f"[ROUTE] turn_count={turn_count}, verdict={verdict}, "
174
+ f"remaining={len(questions_remaining)}, awaiting_counter={awaiting_counter}, "
175
+ f"counter_asked={state.get('counter_questions_asked', 0)}, decision={decision}")
176
 
177
+ return decision
 
 
178
 
 
 
 
179
 
 
backend/llm.py CHANGED
@@ -2,25 +2,59 @@ import os
2
 
3
  import httpx
4
 
5
- _OPENROUTER_URL = "https://openrouter.ai/api/v1/chat/completions"
6
- _MODEL = "nvidia/nemotron-3-super-120b-a12b:free"
7
 
 
 
8
 
9
- async def call_llm(messages: list[dict], max_tokens: int = 512) -> str:
10
- """Call OpenRouter API. Returns the assistant message content string."""
11
- api_key = os.getenv("OPENROUTER_API_KEY", "")
12
  async with httpx.AsyncClient(timeout=60.0) as client:
13
  response = await client.post(
14
- _OPENROUTER_URL,
15
  headers={
16
  "Authorization": f"Bearer {api_key}",
17
  "Content-Type": "application/json",
18
  },
19
  json={
20
- "model": _MODEL,
21
  "messages": messages,
22
  "max_tokens": max_tokens,
23
  },
24
  )
25
  response.raise_for_status()
26
  return response.json()["choices"][0]["message"]["content"] or ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
  import httpx
4
 
5
+ _GEMINI_URL = "https://generativelanguage.googleapis.com/v1beta/chat/completions"
6
+ _GEMINI_MODEL = "gemini-3.1-flash-lite-preview"
7
 
8
+ _GROQ_URL = "https://api.groq.com/openai/v1/chat/completions"
9
+ _GROQ_MODEL = "llama-3.3-70b-versatile"
10
 
11
+
12
+ async def _call_gemini(messages: list[dict], max_tokens: int) -> str:
13
+ api_key = os.getenv("GEMINI_API_KEY", "")
14
  async with httpx.AsyncClient(timeout=60.0) as client:
15
  response = await client.post(
16
+ _GEMINI_URL,
17
  headers={
18
  "Authorization": f"Bearer {api_key}",
19
  "Content-Type": "application/json",
20
  },
21
  json={
22
+ "model": _GEMINI_MODEL,
23
  "messages": messages,
24
  "max_tokens": max_tokens,
25
  },
26
  )
27
  response.raise_for_status()
28
  return response.json()["choices"][0]["message"]["content"] or ""
29
+
30
+
31
+ async def _call_groq(messages: list[dict], max_tokens: int) -> str:
32
+ api_key = os.getenv("GROQ_API_KEY", "")
33
+ async with httpx.AsyncClient(timeout=60.0) as client:
34
+ response = await client.post(
35
+ _GROQ_URL,
36
+ headers={
37
+ "Authorization": f"Bearer {api_key}",
38
+ "Content-Type": "application/json",
39
+ },
40
+ json={
41
+ "model": _GROQ_MODEL,
42
+ "messages": messages,
43
+ "max_tokens": max_tokens,
44
+ },
45
+ )
46
+ response.raise_for_status()
47
+ return response.json()["choices"][0]["message"]["content"] or ""
48
+
49
+
50
+ async def call_llm(messages: list[dict], max_tokens: int = 512) -> str:
51
+ """Call Gemini (primary) with Groq fallback. Returns assistant message content."""
52
+ try:
53
+ result = await _call_gemini(messages, max_tokens)
54
+ if result:
55
+ return result
56
+ print("[LLM] Gemini returned empty, falling back to Groq")
57
+ except Exception as e:
58
+ print(f"[LLM] Gemini failed: {e}, falling back to Groq")
59
+
60
+ return await _call_groq(messages, max_tokens)
backend/routers/interview.py CHANGED
@@ -1,4 +1,5 @@
1
  import json
 
2
 
3
  from fastapi import APIRouter, Depends, HTTPException, Request
4
  from pydantic import BaseModel
@@ -54,14 +55,17 @@ async def start_interview(
54
  session = await queries.create_interview_session(user["user_id"], body.topic_id)
55
  session_id = str(session["id"])
56
 
 
 
 
 
 
 
57
  initial_state = {
58
  "topic_name": topic["name"],
59
  "session_id": session_id,
60
  "student_id": user["user_id"],
61
- "questions_remaining": [
62
- {"question_text": q["question_text"], "difficulty": q["difficulty"]}
63
- for q in questions
64
- ],
65
  "past_best_score": past_best_score,
66
  "past_weak_areas": past_weak_areas,
67
  "messages": [],
@@ -84,6 +88,7 @@ async def start_interview(
84
  return {
85
  "session_id": session_id,
86
  "message": _last_ai_message(result["messages"]),
 
87
  }
88
 
89
 
@@ -102,10 +107,33 @@ async def interview_turn(
102
  graph = request.app.state.graph
103
  config = {"configurable": {"thread_id": body.session_id}}
104
 
105
- result = await graph.ainvoke(
106
- {"messages": [{"role": "human", "content": body.student_message}]},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
  config,
 
 
108
  )
 
109
 
110
  is_complete = result.get("status") == "complete"
111
  response: dict = {
 
1
  import json
2
+ import random
3
 
4
  from fastapi import APIRouter, Depends, HTTPException, Request
5
  from pydantic import BaseModel
 
55
  session = await queries.create_interview_session(user["user_id"], body.topic_id)
56
  session_id = str(session["id"])
57
 
58
+ shuffled_questions = [
59
+ {"question_text": q["question_text"], "difficulty": q["difficulty"]}
60
+ for q in questions
61
+ ]
62
+ random.shuffle(shuffled_questions)
63
+
64
  initial_state = {
65
  "topic_name": topic["name"],
66
  "session_id": session_id,
67
  "student_id": user["user_id"],
68
+ "questions_remaining": shuffled_questions,
 
 
 
69
  "past_best_score": past_best_score,
70
  "past_weak_areas": past_weak_areas,
71
  "messages": [],
 
88
  return {
89
  "session_id": session_id,
90
  "message": _last_ai_message(result["messages"]),
91
+ "turn_count": result.get("turn_count", 0),
92
  }
93
 
94
 
 
107
  graph = request.app.state.graph
108
  config = {"configurable": {"thread_id": body.session_id}}
109
 
110
+ # Hard safety check — refuse turns beyond the limit regardless of graph state
111
+ MAX_TURNS = 5
112
+ checkpoint = await graph.checkpointer.aget(config)
113
+ if checkpoint:
114
+ current_turns = checkpoint.get("channel_values", {}).get("turn_count", 0)
115
+ print(f"[TURN GUARD] session={body.session_id}, current_turns={current_turns}, max={MAX_TURNS}")
116
+ if current_turns > MAX_TURNS:
117
+ guard_feedback = {
118
+ "score": 0, "summary": "Interview ended — turn limit reached.",
119
+ "concept_score": 0, "depth_score": 0, "mistakes": [], "tips": [],
120
+ }
121
+ await queries.update_session_complete(body.session_id, 0, guard_feedback)
122
+ return {
123
+ "message": "Interview complete — turn limit reached.",
124
+ "turn_count": current_turns,
125
+ "is_complete": True,
126
+ "is_counter_q": False,
127
+ "feedback": guard_feedback,
128
+ }
129
+
130
+ # Update state with the student's message, then resume from the interrupt
131
+ await graph.aupdate_state(
132
  config,
133
+ {"messages": [{"role": "human", "content": body.student_message}]},
134
+ as_node="ask_question",
135
  )
136
+ result = await graph.ainvoke(None, config)
137
 
138
  is_complete = result.get("status") == "complete"
139
  response: dict = {
frontend/src/components/interview/ProgressBar.tsx CHANGED
@@ -10,7 +10,7 @@ export default function ProgressBar({ current, max }: Props) {
10
  <div className="px-4 py-2 border-b border-gray-800">
11
  <div className="flex items-center justify-between text-xs text-gray-400 mb-1">
12
  <span>Progress</span>
13
- <span>Turn {current} of {max}</span>
14
  </div>
15
  <div className="h-1.5 bg-gray-800 rounded-full overflow-hidden">
16
  <div
 
10
  <div className="px-4 py-2 border-b border-gray-800">
11
  <div className="flex items-center justify-between text-xs text-gray-400 mb-1">
12
  <span>Progress</span>
13
+ <span>Question {current} of {max}</span>
14
  </div>
15
  <div className="h-1.5 bg-gray-800 rounded-full overflow-hidden">
16
  <div
frontend/src/components/shared/Navbar.tsx CHANGED
@@ -14,7 +14,12 @@ export default function Navbar() {
14
 
15
  return (
16
  <nav className="bg-gray-900 border-b border-gray-800 px-6 py-3 flex items-center justify-between">
17
- <span className="text-white font-semibold text-lg tracking-tight">InterviewMentor</span>
 
 
 
 
 
18
  {user && (
19
  <div className="flex items-center gap-4">
20
  <span className="text-gray-400 text-sm">{user.full_name}</span>
 
14
 
15
  return (
16
  <nav className="bg-gray-900 border-b border-gray-800 px-6 py-3 flex items-center justify-between">
17
+ <button
18
+ onClick={() => navigate(user?.role === 'instructor' ? '/instructor/dashboard' : '/student/dashboard')}
19
+ className="text-white font-semibold text-lg tracking-tight hover:text-indigo-400 transition-colors"
20
+ >
21
+ InterviewMentor
22
+ </button>
23
  {user && (
24
  <div className="flex items-center gap-4">
25
  <span className="text-gray-400 text-sm">{user.full_name}</span>
frontend/src/pages/Interview.tsx CHANGED
@@ -16,6 +16,7 @@ export default function Interview() {
16
  useInterviewStore()
17
 
18
  const [error, setError] = useState('')
 
19
 
20
  // On mount: resume existing session or start new one
21
  useEffect(() => {
@@ -53,6 +54,7 @@ export default function Interview() {
53
  try {
54
  const data = await startInterview(topicId)
55
  startSession(data.session_id, topicId, data.message)
 
56
  // Update URL to include session id without triggering re-mount
57
  navigate(`/student/interview/${data.session_id}`, { replace: true, state: location.state })
58
  } catch {
@@ -63,6 +65,14 @@ export default function Interview() {
63
 
64
  async function handleSend(answer: string) {
65
  if (!useInterviewStore.getState().sessionId) return
 
 
 
 
 
 
 
 
66
  const currentSessionId = useInterviewStore.getState().sessionId!
67
 
68
  addMessage({ role: 'student', content: answer })
@@ -71,12 +81,15 @@ export default function Interview() {
71
 
72
  try {
73
  const data = await sendTurn(currentSessionId, answer)
74
- addMessage({ role: 'ai', content: data.message })
75
  if (data.turn_count != null) setTurnCount(data.turn_count)
76
 
77
  if (data.is_complete) {
 
 
 
78
  setStatus('finished')
79
  } else {
 
80
  setStatus('waiting')
81
  }
82
  } catch {
@@ -112,14 +125,37 @@ export default function Interview() {
112
  )}
113
 
114
  {isFinished ? (
115
- <div className="border-t border-gray-800 px-4 py-4 text-center">
116
- <p className="text-gray-400 text-sm mb-3">Interview complete!</p>
117
- <button
118
- onClick={() => navigate(`/student/report/${currentSessionId}`)}
119
- className="bg-indigo-600 hover:bg-indigo-500 text-white rounded-xl px-6 py-2 text-sm font-medium transition-colors"
120
- >
121
- View Report
122
- </button>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
  </div>
124
  ) : (
125
  status !== 'idle' && (
 
16
  useInterviewStore()
17
 
18
  const [error, setError] = useState('')
19
+ const [feedback, setFeedback] = useState<Record<string, unknown> | null>(null)
20
 
21
  // On mount: resume existing session or start new one
22
  useEffect(() => {
 
54
  try {
55
  const data = await startInterview(topicId)
56
  startSession(data.session_id, topicId, data.message)
57
+ if (data.turn_count != null) setTurnCount(data.turn_count)
58
  // Update URL to include session id without triggering re-mount
59
  navigate(`/student/interview/${data.session_id}`, { replace: true, state: location.state })
60
  } catch {
 
65
 
66
  async function handleSend(answer: string) {
67
  if (!useInterviewStore.getState().sessionId) return
68
+
69
+ // Frontend safety: don't send if already at max turns
70
+ const { turnCount: currentTurn, maxTurns: limit } = useInterviewStore.getState()
71
+ if (currentTurn > limit) {
72
+ setStatus('finished')
73
+ return
74
+ }
75
+
76
  const currentSessionId = useInterviewStore.getState().sessionId!
77
 
78
  addMessage({ role: 'student', content: answer })
 
81
 
82
  try {
83
  const data = await sendTurn(currentSessionId, answer)
 
84
  if (data.turn_count != null) setTurnCount(data.turn_count)
85
 
86
  if (data.is_complete) {
87
+ if (data.feedback) {
88
+ setFeedback(data.feedback)
89
+ }
90
  setStatus('finished')
91
  } else {
92
+ addMessage({ role: 'ai', content: data.message })
93
  setStatus('waiting')
94
  }
95
  } catch {
 
125
  )}
126
 
127
  {isFinished ? (
128
+ <div className="border-t border-gray-800 px-4 py-6 space-y-4">
129
+ <p className="text-center text-white font-semibold text-lg">Interview Complete!</p>
130
+
131
+ {feedback && (
132
+ <div className="bg-gray-900 border border-gray-800 rounded-xl p-5 space-y-3 mx-4">
133
+ <div className="flex items-center justify-between">
134
+ <span className="text-gray-400 text-sm">Score</span>
135
+ <span className={`text-2xl font-bold ${
136
+ Number(feedback.score) >= 70 ? 'text-green-400' :
137
+ Number(feedback.score) >= 40 ? 'text-yellow-400' : 'text-red-400'
138
+ }`}>
139
+ {feedback.score ?? 0}/100
140
+ </span>
141
+ </div>
142
+ {feedback.summary && (
143
+ <p className="text-gray-300 text-sm">{String(feedback.summary)}</p>
144
+ )}
145
+ </div>
146
+ )}
147
+
148
+ <div className="text-center">
149
+ <button
150
+ onClick={() => {
151
+ useInterviewStore.getState().reset()
152
+ navigate('/student/dashboard')
153
+ }}
154
+ className="bg-indigo-600 hover:bg-indigo-500 text-white rounded-xl px-6 py-2 text-sm font-medium transition-colors"
155
+ >
156
+ Back to Dashboard
157
+ </button>
158
+ </div>
159
  </div>
160
  ) : (
161
  status !== 'idle' && (
frontend/src/pages/Report.tsx CHANGED
@@ -42,7 +42,7 @@ export default function Report() {
42
  </div>
43
  )}
44
 
45
- {report && (
46
  <>
47
  {/* Header */}
48
  <div className="bg-gray-900 border border-gray-800 rounded-xl p-6 flex items-center justify-between">
 
42
  </div>
43
  )}
44
 
45
+ {report && report.feedback && (
46
  <>
47
  {/* Header */}
48
  <div className="bg-gray-900 border border-gray-800 rounded-xl p-6 flex items-center justify-between">
frontend/src/store/interviewStore.ts CHANGED
@@ -23,7 +23,7 @@ const initialState: InterviewState = {
23
  topicId: null,
24
  messages: [],
25
  turnCount: 0,
26
- maxTurns: 2,
27
  status: 'idle',
28
  }
29
 
 
23
  topicId: null,
24
  messages: [],
25
  turnCount: 0,
26
+ maxTurns: 5,
27
  status: 'idle',
28
  }
29