File size: 5,365 Bytes
73633b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
# ─────────────────────────────────────────────
#  src/evaluator.py
#  Checks user answers and computes scores.
#  Simple but important β€” this is what makes
#  the project interactive and demo-worthy.
# ─────────────────────────────────────────────

from src.mcq_builder import MCQ


# ─────────────────────────────────────────────
#  CHECK A SINGLE ANSWER
# ─────────────────────────────────────────────

def check_answer(mcq: MCQ, user_choice: int) -> bool:
    """
    Check if the user's selected option index is correct.
    
    Parameters:
        mcq         : the MCQ object
        user_choice : index 0-3 that the user selected
    
    Returns: True if correct, False otherwise
    """
    return user_choice == mcq.correct_index


# ─────────────────────────────────────────────
#  SCORE A FULL QUIZ
# ─────────────────────────────────────────────

def score_quiz(mcqs: list[MCQ], user_answers: list[int]) -> dict:
    """
    Score all questions and return a detailed results dict.
    
    Parameters:
        mcqs         : list of MCQ objects (the quiz)
        user_answers : list of int indices (user's selections, one per MCQ)
    
    Returns:
        {
          "score"      : 7,           ← number correct
          "total"      : 10,          ← total questions
          "percentage" : 70.0,
          "results"    : [            ← per-question details
            {
              "question"      : "In what year was ISRO founded?",
              "your_answer"   : "1975",
              "correct_answer": "1969",
              "is_correct"    : False,
              "explanation"   : "ISRO was founded in 1969 by Vikram Sarabhai.",
            },
            ...
          ]
        }
    """
    score   = 0
    results = []

    for i, (mcq, user_choice) in enumerate(zip(mcqs, user_answers)):
        is_correct = check_answer(mcq, user_choice)
        if is_correct:
            score += 1

        results.append({
            "question"       : mcq.question,
            "your_answer"    : mcq.options[user_choice] if 0 <= user_choice < len(mcq.options) else "No answer",
            "correct_answer" : mcq.correct_answer,
            "is_correct"     : is_correct,
            "explanation"    : mcq.explanation,
            "all_options"    : mcq.options,
            "correct_index"  : mcq.correct_index,
            "user_index"     : user_choice,
        })

    total      = len(mcqs)
    percentage = round((score / total) * 100, 1) if total > 0 else 0.0

    # Provide a feedback message based on score
    if percentage >= 80:
        feedback = "Excellent! You have a strong understanding of this passage."
    elif percentage >= 60:
        feedback = "Good effort! Review the explanations for questions you missed."
    elif percentage >= 40:
        feedback = "Fair attempt. Try re-reading the passage and retaking the quiz."
    else:
        feedback = "Keep practising! The explanations below will help you understand."

    return {
        "score"      : score,
        "total"      : total,
        "percentage" : percentage,
        "feedback"   : feedback,
        "results"    : results,
    }


# ─────────────────────────────────────────────
#  QUICK TEST
#  python src/evaluator.py
# ─────────────────────────────────────────────

if __name__ == "__main__":
    # Simulate 3 MCQs without running the full pipeline
    fake_mcqs = [
        MCQ("What year was ISRO founded?",
            ["1969", "1975", "1947", "1985"], 0, "1969",
            "ISRO was founded in 1969 by Vikram Sarabhai."),
        MCQ("Who founded ISRO?",
            ["Kalam", "Vikram Sarabhai", "Nehru", "Dhawan"], 1, "Vikram Sarabhai",
            "ISRO was founded in 1969 by Vikram Sarabhai."),
        MCQ("What did Chandrayaan-1 discover?",
            ["Oxygen", "Iron", "Water molecules", "Helium"], 2, "Water molecules",
            "Chandrayaan-1 discovered water molecules on the Moon."),
    ]

    # Simulate user answers: Q1 correct, Q2 wrong, Q3 correct
    user_answers = [0, 0, 2]

    result = score_quiz(fake_mcqs, user_answers)

    print("=== QUIZ RESULTS ===")
    print(f"Score: {result['score']} / {result['total']} ({result['percentage']}%)")
    print(f"Feedback: {result['feedback']}\n")
    for i, r in enumerate(result['results'], 1):
        status = "CORRECT" if r['is_correct'] else "WRONG"
        print(f"Q{i} [{status}] {r['question']}")
        print(f"     Your answer   : {r['your_answer']}")
        if not r['is_correct']:
            print(f"     Correct answer: {r['correct_answer']}")
        print(f"     Explanation   : {r['explanation'][:80]}...")
        print()