| from datasets import Metric |
|
|
| class CUEBenchMetric(Metric): |
| def _info(self): |
| return { |
| "description": "F1, Precision, and Recall for multi-label set prediction in CUEBench", |
| "inputs_description": "List of predicted and reference class sets", |
| "citation": "", |
| } |
|
|
| def _MeanReciprocalRank(self, predicted, target): |
| if not predicted or not target: |
| return 0 |
| predicted = [str(p).lower() for p in predicted] |
| target = [str(t).lower() for t in target] |
| for i, p in enumerate(predicted): |
| if p in target: |
| return 1 / (i + 1) |
| return 0 |
|
|
| def _Hits_at_K(self, predicted, target, k): |
| if not predicted or not target: |
| return 0 |
| predicted = [str(p).lower() for p in predicted] |
| target = [str(t).lower() for t in target] |
| return sum(1 for p in predicted[:k] if p in target) |
|
|
| def _coverage(self, _pd_Res, _eGold, _scores=None): |
| """ |
| Evaluate predictions (_pd_Res) against gold labels (_eGold). |
| Optionally, pass _scores (same length as _pd_Res) if you want to track prediction scores. |
| |
| Returns: |
| res: [cov@len(_eGold), cov@1, cov@3, cov@5, rank_first_gold] |
| l_gold_pred: (_eGold, (top_predicted_labels, top_scores)) at len(_eGold) |
| """ |
| res = {} |
| l_gold_pred = () |
| |
| if not _pd_Res or not _eGold: |
| for k in [1, 3, 5, 10]: |
| res[k] = 0 |
| |
| |
| return res, l_gold_pred |
|
|
| all_labels = _pd_Res |
|
|
| |
| if set(_eGold) & set(all_labels): |
| |
| rank_first_gold = min([r + 1 for r, l in enumerate(all_labels) if l in _eGold]) |
|
|
| for k in [1, 3, 5, 10]: |
| top_k_labels = all_labels[:k] |
| overlap = set(top_k_labels) & set(_eGold) |
| cov_k = len(overlap) / k |
| res[k] = (cov_k) |
|
|
| if k >= len(_eGold): |
| top_scores = _scores[:k] if _scores else None |
| l_gold_pred = (_eGold, (top_k_labels, top_scores)) |
|
|
| |
| return res, l_gold_pred |
| else: |
| for k in [1, 3, 5, 10]: |
| res[k] = 0 |
| |
| |
| return res, l_gold_pred |
|
|
|
|
| def _clean(self, strings): |
| cleaned = [] |
| for s in strings: |
| |
| s = s.replace('*', '').strip() |
| |
| |
| if (s.startswith("'") and s.endswith("'")) or (s.startswith('"') and s.endswith('"')): |
| s = s[1:-1] |
| |
| |
| s = s.replace('[', '').replace(']', '') |
| |
| |
| if ':' in s: |
| s = s.split(':')[-1] |
| |
| |
| s = s.strip(' _\\"\'') |
| |
| cleaned.append(s) |
| return cleaned |
|
|
| def _compute(self, outputs): |
| for i in range(len(outputs)): |
| outputs[i]['predicted_classes'] = self._clean(outputs[i]['predicted_classes']) |
| |
| average_mrr = 0 |
| for i in outputs: |
| average_mrr += self._MeanReciprocalRank(i['predicted_classes'], i['target_classes']) |
|
|
| average_mrr = average_mrr / len(outputs) |
|
|
| hits_at_1 = 0 |
| hits_at_3 = 0 |
| hits_at_5 = 0 |
| hits_at_10 = 0 |
|
|
| for i in outputs: |
| hits_at_1 += 1 if self._Hits_at_K(i['predicted_classes'], i['target_classes'], 1) > 0 else 0 |
| hits_at_3 += 1 if self._Hits_at_K(i['predicted_classes'], i['target_classes'], 3) > 0 else 0 |
| hits_at_5 += 1 if self._Hits_at_K(i['predicted_classes'], i['target_classes'], 5) > 0 else 0 |
| hits_at_10 += 1 if self._Hits_at_K(i['predicted_classes'], i['target_classes'], 10) > 0 else 0 |
|
|
| hits_at_1 = hits_at_1 / len(outputs) |
| hits_at_3 = hits_at_3 / len(outputs) |
| hits_at_5 = hits_at_5 / len(outputs) |
| hits_at_10 = hits_at_10 / len(outputs) |
|
|
| cov_1 = 0 |
| cov_3 = 0 |
| cov_5 = 0 |
| cov_10 = 0 |
|
|
| for i in outputs: |
| res, l_gold_pred = self._coverage(i['predicted_classes'], i['target_classes']) |
| cov_1 += res[1] |
| cov_3 += res[3] |
| cov_5 += res[5] |
| cov_10 += res[10] |
|
|
| cov_1 = cov_1 / len(outputs) |
| cov_3 = cov_3 / len(outputs) |
| cov_5 = cov_5 / len(outputs) |
| cov_10 = cov_10 / len(outputs) |
| |
| return { |
| "average_mrr": average_mrr, |
| "hits_at_1" : hits_at_1, |
| "hits_at_3" : hits_at_3, |
| "hits_at_5" : hits_at_5, |
| "hits_at_10" : hits_at_10, |
| "coverage_at_1" : cov_1, |
| "coverage_at_3" : cov_3, |
| "coverage_at_5" : cov_5, |
| "coverage_at_10" : cov_10, |
| } |
|
|
|
|
|
|
|
|