File size: 2,021 Bytes
83d24b2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 | from __future__ import annotations
import logging
from collections import defaultdict
from datasets import Dataset
from ..encoder_interface import Encoder, EncoderWithQueryCorpusEncode
from ..evaluation.evaluators import PairClassificationEvaluator
from ..MTEBResults import ScoresDict
from .AbsTask import AbsTask
logger = logging.getLogger(__name__)
class AbsTaskPairClassification(AbsTask):
"""Abstract class for PairClassificationTasks
The similarity is computed between pairs and the results are ranked. Average precision
is computed to measure how well the methods can be used for pairwise pair classification.
self.load_data() must generate a huggingface dataset with a split matching self.metadata_dict["eval_splits"], and assign it to self.dataset. It must contain the following columns:
sent1: list[str]
sent2: list[str]
labels: list[int]
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _add_main_score(self, scores) -> None:
scores["main_score"] = scores["max"][self.metadata.main_score]
def _evaluate_subset(
self,
model: Encoder | EncoderWithQueryCorpusEncode,
dataset: Dataset,
**kwargs,
) -> ScoresDict:
data_split = dataset[0]
logging.getLogger(
"sentence_transformers.evaluation.PairClassificationEvaluator"
).setLevel(logging.WARN)
evaluator = PairClassificationEvaluator(
data_split["sent1"], data_split["sent2"], data_split["labels"], **kwargs
)
scores = evaluator.compute_metrics(model)
# Compute max
max_scores = defaultdict(list)
for sim_fct in scores:
for metric in ["accuracy", "f1", "ap"]:
max_scores[metric].append(scores[sim_fct][metric])
for metric in max_scores:
max_scores[metric] = max(max_scores[metric])
scores["max"] = dict(max_scores)
self._add_main_score(scores)
return scores
|