Benny-Tang commited on
Commit
30ace27
·
verified ·
1 Parent(s): 4fd9b16

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -65
app.py CHANGED
@@ -4,98 +4,66 @@ import os
4
  import gradio as gr
5
  from agents import AnalyzerAgent, CoachAgent, PredictiveAgent
6
 
7
- # Paths
8
  QUESTIONS_FILE = "questions.json"
9
  PREDICTIONS_CACHE = "predictions_cache.json"
10
 
11
- # Load questions (merged bank)
12
  if not os.path.exists(QUESTIONS_FILE):
13
- # create minimal placeholder
14
  with open(QUESTIONS_FILE, "w", encoding="utf-8") as f:
15
- json.dump([], f, indent=2, ensure_ascii=False)
16
 
17
  with open(QUESTIONS_FILE, "r", encoding="utf-8") as f:
18
  QUESTION_BANK = json.load(f)
19
 
20
- # Ensure predictions cache exists
21
  if not os.path.exists(PREDICTIONS_CACHE):
22
  with open(PREDICTIONS_CACHE, "w", encoding="utf-8") as f:
23
  json.dump({}, f)
24
 
25
- # Agents
26
  analyzer = AnalyzerAgent()
27
  coach_agent = CoachAgent()
28
  predictor = PredictiveAgent(cache_path=PREDICTIONS_CACHE)
29
 
30
  def ensure_predictions_injected(level, subject, n=8):
31
- """
32
- Ensure predicted questions for level+subject are generated, cached and injected into QUESTION_BANK.
33
- Predictions are stored in predictions_cache.json and also appended to in-memory QUESTION_BANK
34
- with a 'source': 'predicted' flag and high id space (>=900000).
35
- """
36
  key = f"{level}_{subject}"
37
- # Load cache
38
  with open(predictor.cache_path, "r", encoding="utf-8") as f:
39
  cache = json.load(f)
40
 
41
  if key in cache and cache[key].get("injected"):
42
- # already injected into QUESTION_BANK in this process run
43
  return
44
 
45
- # ensure we have predictions (generate or fetch)
46
  preds = predictor.get_or_generate_predictions(level, subject, QUESTION_BANK, n=n)
47
 
48
- # assign unique high IDs and append to in-memory bank and cache
49
- next_pred_id = 900000
50
  existing_ids = {q["id"] for q in QUESTION_BANK}
51
- while next_pred_id in existing_ids:
52
- next_pred_id += 1
53
 
54
- appended = []
55
  for p in preds:
56
- # create full question structure expected by app
57
- q = {
58
- "id": next_pred_id,
59
  "subject": f"{level}_{subject}",
60
  "question_type": p.get("question_type", "mcq"),
61
  "text": p.get("text"),
62
  "choices": p.get("choices", []),
63
  "correct_answer": p.get("predicted_answer", ""),
64
- "topics": p.get("topics", []),
65
  "difficulty": p.get("difficulty", 3),
66
- "source": "predicted" # invisible flag
67
- }
68
- QUESTION_BANK.append(q)
69
- appended.append(q)
70
- next_pred_id += 1
71
 
72
- # update cache to mark injected (and save predicted objects)
73
  cache[key] = {"predictions": preds, "injected": True}
74
  with open(predictor.cache_path, "w", encoding="utf-8") as f:
75
  json.dump(cache, f, indent=2, ensure_ascii=False)
76
 
77
- # Optionally persist updated QUESTION_BANK (not required; merge script is authoritative)
78
- # with open(QUESTIONS_FILE, "w", encoding="utf-8") as f:
79
- # json.dump(QUESTION_BANK, f, indent=2, ensure_ascii=False)
80
-
81
- return appended
82
-
83
  def start_exam(level, subject, num_questions=10, include_predicted=True):
84
- """
85
- Start a randomized exam. If include_predicted is True, predictions will be ensured and mixed
86
- invisibly into the pool before sampling.
87
- """
88
  if include_predicted:
89
- # ensure predictions are in in-memory bank
90
  ensure_predictions_injected(level, subject, n=8)
91
 
92
- # Pool all questions for that level + subject (subject uses format Level_Subject, e.g., Form5_Math)
93
  pool = [q for q in QUESTION_BANK if q.get("subject") == f"{level}_{subject}"]
94
 
95
  if not pool:
96
- return [], gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)
97
 
98
- # Shuffle and pick
99
  random.shuffle(pool)
100
  selected = pool[:min(num_questions, len(pool))]
101
 
@@ -103,32 +71,28 @@ def start_exam(level, subject, num_questions=10, include_predicted=True):
103
  {"id": q["id"], "text": q["text"], "choices": q.get("choices", []), "topics": q.get("topics", [])}
104
  for q in selected
105
  ]
106
- return exam_data, gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)
107
 
108
  def submit_exam(answers, exam_data, level, subject):
109
  if not exam_data:
110
- return "No questions found for selected level/subject.", {}, {}, {}, gr.update(visible=False), gr.update(visible=True)
111
 
112
  correct = 0
113
  per_question = {}
114
  for q in exam_data:
115
  qid = str(q["id"])
116
- user_ans = answers.get(qid, None)
117
  orig = next((item for item in QUESTION_BANK if item["id"] == q["id"]), None)
118
  correct_ans = orig.get("correct_answer") if orig else None
119
  per_question[qid] = {"user": user_ans, "correct": correct_ans, "topics": q.get("topics", [])}
120
- if user_ans is not None and correct_ans is not None and str(user_ans).strip() == str(correct_ans).strip():
121
  correct += 1
122
 
123
  score = round(100 * correct / len(exam_data), 2)
124
 
125
- # Analyzer (via GLM)
126
  analysis = analyzer.analyze(per_question)
127
-
128
- # Coach (via GLM)
129
  coach = coach_agent.coach(analysis, level, subject)
130
 
131
- # Predictor summary for UI (predictions are cached & may have been injected)
132
  with open(predictor.cache_path, "r", encoding="utf-8") as f:
133
  cache = json.load(f)
134
  pred_key = f"{level}_{subject}"
@@ -145,22 +109,27 @@ def submit_exam(answers, exam_data, level, subject):
145
 
146
  # Gradio UI
147
  with gr.Blocks() as demo:
148
- gr.Markdown("## 📘 SPM Exam Simulator with Invisible Predictions")
149
 
150
  with gr.Row():
151
- level = gr.Dropdown(["Form5"], value="Form5", label="Select Level (SPM = Form5)")
152
- subject = gr.Dropdown(["Math", "Science", "Physics", "Chemistry", "Biology"], value="Math", label="Select Subject")
153
- num_q = gr.Slider(minimum=5, maximum=50, step=5, value=10, label="Number of Questions")
154
- include_pred = gr.Checkbox(value=True, label="Include AI-predicted questions (hidden to student)")
155
- start_btn = gr.Button("Start Randomized Practice (mixed pool)")
 
 
 
 
 
156
 
157
  exam_output = gr.State()
158
 
159
  exam_area = gr.Column(visible=False)
160
  with exam_area:
161
- gr.Markdown("### Exam Questions (edit your answers as JSON: {id: 'choice or text'})")
162
- exam_display = gr.JSON(label="Questions")
163
- answers_box = gr.JSON(label="Your Answers (e.g. {\"900001\":\"A\",\"1002\":\"2/3\"})")
164
  submit_btn = gr.Button("Submit Exam")
165
 
166
  results_area = gr.Column(visible=False)
@@ -170,17 +139,15 @@ with gr.Blocks() as demo:
170
  analysis_json = gr.JSON()
171
  with gr.Tab("Study Coach"):
172
  coach_json = gr.JSON()
173
- with gr.Tab("SPM Predictions (admin)"):
174
  predictions_json = gr.JSON()
175
 
176
- # start exam -> show exam_area
177
  start_btn.click(
178
  start_exam,
179
  inputs=[level, subject, num_q, include_pred],
180
- outputs=[exam_display, exam_area, results_area, score_text]
181
- ).then(lambda x: x, outputs=exam_output)
182
 
183
- # submit exam -> produce results & predictions
184
  submit_btn.click(
185
  submit_exam,
186
  inputs=[answers_box, exam_output, level, subject],
@@ -193,3 +160,4 @@ if __name__ == "__main__":
193
 
194
 
195
 
 
 
4
  import gradio as gr
5
  from agents import AnalyzerAgent, CoachAgent, PredictiveAgent
6
 
 
7
  QUESTIONS_FILE = "questions.json"
8
  PREDICTIONS_CACHE = "predictions_cache.json"
9
 
 
10
  if not os.path.exists(QUESTIONS_FILE):
 
11
  with open(QUESTIONS_FILE, "w", encoding="utf-8") as f:
12
+ json.dump([], f, indent=2)
13
 
14
  with open(QUESTIONS_FILE, "r", encoding="utf-8") as f:
15
  QUESTION_BANK = json.load(f)
16
 
 
17
  if not os.path.exists(PREDICTIONS_CACHE):
18
  with open(PREDICTIONS_CACHE, "w", encoding="utf-8") as f:
19
  json.dump({}, f)
20
 
 
21
  analyzer = AnalyzerAgent()
22
  coach_agent = CoachAgent()
23
  predictor = PredictiveAgent(cache_path=PREDICTIONS_CACHE)
24
 
25
  def ensure_predictions_injected(level, subject, n=8):
 
 
 
 
 
26
  key = f"{level}_{subject}"
 
27
  with open(predictor.cache_path, "r", encoding="utf-8") as f:
28
  cache = json.load(f)
29
 
30
  if key in cache and cache[key].get("injected"):
 
31
  return
32
 
 
33
  preds = predictor.get_or_generate_predictions(level, subject, QUESTION_BANK, n=n)
34
 
35
+ next_id = 900000
 
36
  existing_ids = {q["id"] for q in QUESTION_BANK}
37
+ while next_id in existing_ids:
38
+ next_id += 1
39
 
 
40
  for p in preds:
41
+ QUESTION_BANK.append({
42
+ "id": next_id,
 
43
  "subject": f"{level}_{subject}",
44
  "question_type": p.get("question_type", "mcq"),
45
  "text": p.get("text"),
46
  "choices": p.get("choices", []),
47
  "correct_answer": p.get("predicted_answer", ""),
48
+ "topics": [p.get("topic", "general")],
49
  "difficulty": p.get("difficulty", 3),
50
+ "source": "predicted"
51
+ })
52
+ next_id += 1
 
 
53
 
 
54
  cache[key] = {"predictions": preds, "injected": True}
55
  with open(predictor.cache_path, "w", encoding="utf-8") as f:
56
  json.dump(cache, f, indent=2, ensure_ascii=False)
57
 
 
 
 
 
 
 
58
  def start_exam(level, subject, num_questions=10, include_predicted=True):
 
 
 
 
59
  if include_predicted:
 
60
  ensure_predictions_injected(level, subject, n=8)
61
 
 
62
  pool = [q for q in QUESTION_BANK if q.get("subject") == f"{level}_{subject}"]
63
 
64
  if not pool:
65
+ return [], gr.update(visible=False), gr.update(visible=False), gr.update(visible=True), []
66
 
 
67
  random.shuffle(pool)
68
  selected = pool[:min(num_questions, len(pool))]
69
 
 
71
  {"id": q["id"], "text": q["text"], "choices": q.get("choices", []), "topics": q.get("topics", [])}
72
  for q in selected
73
  ]
74
+ return exam_data, gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), exam_data
75
 
76
  def submit_exam(answers, exam_data, level, subject):
77
  if not exam_data:
78
+ return "No questions found.", {}, {}, {}, gr.update(visible=False), gr.update(visible=True)
79
 
80
  correct = 0
81
  per_question = {}
82
  for q in exam_data:
83
  qid = str(q["id"])
84
+ user_ans = answers.get(qid)
85
  orig = next((item for item in QUESTION_BANK if item["id"] == q["id"]), None)
86
  correct_ans = orig.get("correct_answer") if orig else None
87
  per_question[qid] = {"user": user_ans, "correct": correct_ans, "topics": q.get("topics", [])}
88
+ if user_ans and correct_ans and str(user_ans).strip() == str(correct_ans).strip():
89
  correct += 1
90
 
91
  score = round(100 * correct / len(exam_data), 2)
92
 
 
93
  analysis = analyzer.analyze(per_question)
 
 
94
  coach = coach_agent.coach(analysis, level, subject)
95
 
 
96
  with open(predictor.cache_path, "r", encoding="utf-8") as f:
97
  cache = json.load(f)
98
  pred_key = f"{level}_{subject}"
 
109
 
110
  # Gradio UI
111
  with gr.Blocks() as demo:
112
+ gr.Markdown("## 📘 SPM Exam Simulator (2018–2024) with AI Predictions")
113
 
114
  with gr.Row():
115
+ level = gr.Dropdown(["Form5"], value="Form5", label="Level (SPM=Form5)")
116
+ subject = gr.Dropdown(
117
+ ["BM", "English", "Math", "History", "Science", "MoralStudies",
118
+ "Accounting", "Economics", "Business"],
119
+ value="Math",
120
+ label="Subject"
121
+ )
122
+ num_q = gr.Slider(5, 50, step=5, value=10, label="Number of Questions")
123
+ include_pred = gr.Checkbox(True, label="Include AI-predicted")
124
+ start_btn = gr.Button("Start Exam")
125
 
126
  exam_output = gr.State()
127
 
128
  exam_area = gr.Column(visible=False)
129
  with exam_area:
130
+ gr.Markdown("### Questions")
131
+ exam_display = gr.JSON(label="Exam")
132
+ answers_box = gr.JSON(label="Your Answers")
133
  submit_btn = gr.Button("Submit Exam")
134
 
135
  results_area = gr.Column(visible=False)
 
139
  analysis_json = gr.JSON()
140
  with gr.Tab("Study Coach"):
141
  coach_json = gr.JSON()
142
+ with gr.Tab("Predictions (Admin)"):
143
  predictions_json = gr.JSON()
144
 
 
145
  start_btn.click(
146
  start_exam,
147
  inputs=[level, subject, num_q, include_pred],
148
+ outputs=[exam_display, exam_area, results_area, score_text, exam_output]
149
+ )
150
 
 
151
  submit_btn.click(
152
  submit_exam,
153
  inputs=[answers_box, exam_output, level, subject],
 
160
 
161
 
162
 
163
+