Ishwar Balappanawar commited on
Commit
98d1657
·
0 Parent(s):

Initial upload of CUEBench dataset

Browse files
Files changed (4) hide show
  1. README.md +17 -0
  2. cuebench.py +35 -0
  3. metadata.jsonl +0 -0
  4. metric.py +153 -0
README.md ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # CUEBench: Contextual Unobserved Entity Benchmark
2
+
3
+ CUEBench is a neurosymbolic benchmark that emphasizes **contextual entity prediction** in autonomous driving scenes. Unlike traditional detection tasks, CUEBench focuses on reasoning over **unobserved entities** — objects that may be occluded, out-of-frame, or affected by sensor failures.
4
+
5
+ ## Task
6
+
7
+ **Input**: A scene ID and a set of `observed_classes` present in the scene
8
+ **Output**: Predict the `target_classes` that were present but unobserved
9
+
10
+ ### Example
11
+ ```json
12
+ {
13
+ "image_id": "00003.00019",
14
+ "observed_classes": ["Car", "Bus", "Pedestrian"],
15
+ "target_classes": ["PickupTruck"]
16
+ }
17
+
cuebench.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from datasets import DatasetInfo, GeneratorBasedBuilder, SplitGenerator, Split, Value, Features, Sequence
3
+
4
+ class CUEBench(GeneratorBasedBuilder):
5
+ def _info(self):
6
+ return DatasetInfo(
7
+ description="CUEBench: Contextual Entity Prediction for Occluded or Unobserved Entities in Autonomous Driving.",
8
+ features=Features({
9
+ "image_id": Value("string"),
10
+ "observed_classes": Sequence(Value("string")), # Properly represent lists
11
+ "target_classes": Sequence(Value("string")),
12
+ "image_path": Value("string")
13
+ }),
14
+ citation="",
15
+ homepage=""
16
+ )
17
+
18
+ def _split_generators(self, dl_manager):
19
+ data_files = self.config.data_files
20
+ filepath = dl_manager.download_and_extract(data_files["train"] if isinstance(data_files, dict) else data_files)
21
+ return [SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepath": filepath})]
22
+
23
+ def _generate_examples(self, filepath):
24
+ print("f = ", filepath)
25
+ if isinstance(filepath, list):
26
+ filepath = filepath[0]
27
+ with open(filepath, "r", encoding="utf-8") as f:
28
+ for idx, line in enumerate(f):
29
+ example = json.loads(line)
30
+ yield idx, {
31
+ "image_id": example["aligned_id"], # Ensure this key exists in your JSONL
32
+ "image_path": example["image_path"],
33
+ "observed_classes": example["detected_classes"], # Already a list
34
+ "target_classes": example["target_classes"],
35
+ }
metadata.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
metric.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import Metric
2
+
3
+ class CUEBenchMetric(Metric):
4
+ def _info(self):
5
+ return {
6
+ "description": "F1, Precision, and Recall for multi-label set prediction in CUEBench",
7
+ "inputs_description": "List of predicted and reference class sets",
8
+ "citation": "",
9
+ }
10
+
11
+ def _MeanReciprocalRank(self, predicted, target):
12
+ if not predicted or not target:
13
+ return 0
14
+ predicted = [str(p).lower() for p in predicted]
15
+ target = [str(t).lower() for t in target]
16
+ for i, p in enumerate(predicted):
17
+ if p in target:
18
+ return 1 / (i + 1)
19
+ return 0
20
+
21
+ def _Hits_at_K(self, predicted, target, k):
22
+ if not predicted or not target:
23
+ return 0
24
+ predicted = [str(p).lower() for p in predicted]
25
+ target = [str(t).lower() for t in target]
26
+ return sum(1 for p in predicted[:k] if p in target)
27
+
28
+ def _coverage(self, _pd_Res, _eGold, _scores=None):
29
+ """
30
+ Evaluate predictions (_pd_Res) against gold labels (_eGold).
31
+ Optionally, pass _scores (same length as _pd_Res) if you want to track prediction scores.
32
+
33
+ Returns:
34
+ res: [cov@len(_eGold), cov@1, cov@3, cov@5, rank_first_gold]
35
+ l_gold_pred: (_eGold, (top_predicted_labels, top_scores)) at len(_eGold)
36
+ """
37
+ res = {}
38
+ l_gold_pred = ()
39
+
40
+ if not _pd_Res or not _eGold:
41
+ for k in [1, 3, 5, 10]:
42
+ res[k] = 0
43
+
44
+ # res.append(rank_first_gold)
45
+ return res, l_gold_pred
46
+
47
+ all_labels = _pd_Res
48
+
49
+ # Check if there's any overlap between predicted and gold labels
50
+ if set(_eGold) & set(all_labels):
51
+ # Find the 1-based rank of the first correct prediction
52
+ rank_first_gold = min([r + 1 for r, l in enumerate(all_labels) if l in _eGold])
53
+
54
+ for k in [1, 3, 5, 10]:
55
+ top_k_labels = all_labels[:k]
56
+ overlap = set(top_k_labels) & set(_eGold)
57
+ cov_k = len(overlap) / k
58
+ res[k] = (cov_k)
59
+
60
+ if k >= len(_eGold):
61
+ top_scores = _scores[:k] if _scores else None
62
+ l_gold_pred = (_eGold, (top_k_labels, top_scores))
63
+
64
+ # res.append(rank_first_gold)
65
+ return res, l_gold_pred
66
+ else:
67
+ for k in [1, 3, 5, 10]:
68
+ res[k] = 0
69
+
70
+ # res.append(rank_first_gold)
71
+ return res, l_gold_pred
72
+
73
+
74
+ def _clean(self, strings):
75
+ cleaned = []
76
+ for s in strings:
77
+ # Remove all asterisks and extra whitespace first
78
+ s = s.replace('*', '').strip()
79
+
80
+ # Remove surrounding quotes if they match (both single or both double)
81
+ if (s.startswith("'") and s.endswith("'")) or (s.startswith('"') and s.endswith('"')):
82
+ s = s[1:-1]
83
+
84
+ # Remove square brackets
85
+ s = s.replace('[', '').replace(']', '')
86
+
87
+ # Handle colon case - take the part after last colon and clean it
88
+ if ':' in s:
89
+ s = s.split(':')[-1]
90
+
91
+ # Final cleanup - remove any remaining special chars and whitespace
92
+ s = s.strip(' _\\"\'')
93
+
94
+ cleaned.append(s)
95
+ return cleaned
96
+
97
+ def _compute(self, outputs):
98
+ for i in range(len(outputs)):
99
+ outputs[i]['predicted_classes'] = self._clean(outputs[i]['predicted_classes'])
100
+
101
+ average_mrr = 0
102
+ for i in outputs:
103
+ average_mrr += self._MeanReciprocalRank(i['predicted_classes'], i['target_classes'])
104
+
105
+ average_mrr = average_mrr / len(outputs)
106
+
107
+ hits_at_1 = 0
108
+ hits_at_3 = 0
109
+ hits_at_5 = 0
110
+ hits_at_10 = 0
111
+
112
+ for i in outputs:
113
+ hits_at_1 += 1 if self._Hits_at_K(i['predicted_classes'], i['target_classes'], 1) > 0 else 0
114
+ hits_at_3 += 1 if self._Hits_at_K(i['predicted_classes'], i['target_classes'], 3) > 0 else 0
115
+ hits_at_5 += 1 if self._Hits_at_K(i['predicted_classes'], i['target_classes'], 5) > 0 else 0
116
+ hits_at_10 += 1 if self._Hits_at_K(i['predicted_classes'], i['target_classes'], 10) > 0 else 0
117
+
118
+ hits_at_1 = hits_at_1 / len(outputs)
119
+ hits_at_3 = hits_at_3 / len(outputs)
120
+ hits_at_5 = hits_at_5 / len(outputs)
121
+ hits_at_10 = hits_at_10 / len(outputs)
122
+
123
+ cov_1 = 0
124
+ cov_3 = 0
125
+ cov_5 = 0
126
+ cov_10 = 0
127
+
128
+ for i in outputs:
129
+ res, l_gold_pred = self._coverage(i['predicted_classes'], i['target_classes'])
130
+ cov_1 += res[1]
131
+ cov_3 += res[3]
132
+ cov_5 += res[5]
133
+ cov_10 += res[10]
134
+
135
+ cov_1 = cov_1 / len(outputs)
136
+ cov_3 = cov_3 / len(outputs)
137
+ cov_5 = cov_5 / len(outputs)
138
+ cov_10 = cov_10 / len(outputs)
139
+
140
+ return {
141
+ "average_mrr": average_mrr,
142
+ "hits_at_1" : hits_at_1,
143
+ "hits_at_3" : hits_at_3,
144
+ "hits_at_5" : hits_at_5,
145
+ "hits_at_10" : hits_at_10,
146
+ "coverage_at_1" : cov_1,
147
+ "coverage_at_3" : cov_3,
148
+ "coverage_at_5" : cov_5,
149
+ "coverage_at_10" : cov_10,
150
+ }
151
+
152
+
153
+