Spaces:
Running
Running
| """Tests for evaluator.score_submission(). | |
| Run from the EgoMemReason-Space/ directory: | |
| python -m pytest tests/ -q | |
| """ | |
| import json | |
| import os | |
| import pathlib | |
| import sys | |
| import pytest | |
| ROOT = pathlib.Path(__file__).resolve().parents[1] | |
| sys.path.insert(0, str(ROOT)) | |
| import evaluator | |
| ANN = ROOT / "annotations_private.json" | |
| ORACLE = ROOT / "tests" / "fixtures" / "oracle_submission.json" | |
| ALL_A = ROOT / "tests" / "fixtures" / "all_a_submission.json" | |
| pytestmark = pytest.mark.skipif( | |
| not ANN.exists(), | |
| reason=f"{ANN} not present (copy from ../EgoMemReason-EvalAI.archived/)", | |
| ) | |
| def test_oracle_scores_100(): | |
| metrics = evaluator.score_submission(str(ORACLE), str(ANN)) | |
| for k, v in metrics.items(): | |
| assert v == 100.0, f"{k} should be 100.0, got {v}" | |
| def test_all_a_scores_around_14(): | |
| # All-A's exact score depends on the A-letter frequency in the dataset | |
| # — we measured 14.2% during the EvalAI port. Allow a wide band. | |
| metrics = evaluator.score_submission(str(ALL_A), str(ANN)) | |
| assert 10.0 <= metrics["Overall"] <= 20.0, metrics | |
| def test_broken_submission_raises(tmp_path): | |
| broken = tmp_path / "broken.json" | |
| json.dump( | |
| [{"example_id": 1, "predicted_answer": "ZZ"}], # bogus letter + only 1 row | |
| broken.open("w"), | |
| ) | |
| with pytest.raises(ValueError) as exc: | |
| evaluator.score_submission(str(broken), str(ANN)) | |
| assert "must be one of" in str(exc.value) | |
| assert "missing" in str(exc.value) | |