| from __future__ import annotations |
|
|
| import logging |
| from collections import defaultdict |
| from typing import Any |
|
|
| import numpy as np |
|
|
| from ..evaluation.evaluators import ( |
| kNNClassificationEvaluator, |
| kNNClassificationEvaluatorPytorch, |
| logRegClassificationEvaluator, |
| ) |
| from ..MTEBResults import HFSubset, ScoresDict |
| from .AbsTask import AbsTask |
|
|
| logger = logging.getLogger(__name__) |
|
|
|
|
| class AbsTaskClassification(AbsTask): |
| """Abstract class for kNN classification tasks |
| The similarity is computed between pairs and the results are ranked. |
| |
| self.load_data() must generate a huggingface dataset with a split matching self.metadata_dict["eval_splits"], and assign it to self.dataset. It |
| must contain the following columns: |
| text: str |
| label: int |
| """ |
|
|
| def __init__( |
| self, |
| method: str = "logReg", |
| n_experiments: int | None = None, |
| samples_per_label: int | None = None, |
| k: int = 3, |
| batch_size: int = 32, |
| **kwargs, |
| ): |
| super().__init__(**kwargs) |
| self.batch_size = batch_size |
| self.method = method |
|
|
| |
| self.n_experiments: int = ( |
| n_experiments |
| if n_experiments is not None |
| else self.metadata_dict.get("n_experiments", 10) |
| ) |
| self.samples_per_label: int = ( |
| samples_per_label |
| if samples_per_label is not None |
| else self.metadata_dict.get("samples_per_label", 8) |
| ) |
|
|
| |
| self.k = k |
|
|
| |
| |
| |
| |
| if hasattr(self, "metadata"): |
| self.metadata |
|
|
| def _add_main_score(self, scores: dict[HFSubset, ScoresDict]) -> None: |
| scores["main_score"] = scores[self.metadata.main_score] |
|
|
| def evaluate( |
| self, model, eval_split="test", train_split="train", **kwargs |
| ) -> dict[HFSubset, ScoresDict]: |
| if not self.data_loaded: |
| self.load_data() |
|
|
| scores = {} |
| hf_subsets = [l for l in self.dataset] if self.is_multilingual else ["default"] |
|
|
| for hf_subset in hf_subsets: |
| logger.info( |
| f"\nTask: {self.metadata.name}, split: {eval_split}, subset: {hf_subset}. Running..." |
| ) |
|
|
| if hf_subset not in self.dataset and hf_subset == "default": |
| ds = self.dataset |
| else: |
| ds = self.dataset[hf_subset] |
| scores[hf_subset] = self._evaluate_subset( |
| model, ds, eval_split, train_split, **kwargs |
| ) |
| self._add_main_score(scores[hf_subset]) |
|
|
| return scores |
|
|
| def _evaluate_subset( |
| self, model, dataset, eval_split="test", train_split="train", **kwargs |
| ) -> ScoresDict: |
| train_split = dataset[train_split] |
| eval_split = dataset[eval_split] |
| params = {"k": self.k, "batch_size": self.batch_size} |
| params.update(kwargs) |
|
|
| scores = [] |
| test_cache, idxs = ( |
| None, |
| None, |
| ) |
| for i in range(self.n_experiments): |
| logger.info( |
| "=" * 10 + f" Experiment {i+1}/{self.n_experiments} " + "=" * 10 |
| ) |
| |
| X_sampled, y_sampled, idxs = self._undersample_data( |
| train_split["text"], train_split["label"], self.samples_per_label, idxs |
| ) |
|
|
| if self.method == "kNN": |
| evaluator = kNNClassificationEvaluator( |
| X_sampled, |
| y_sampled, |
| eval_split["text"], |
| eval_split["label"], |
| **params, |
| ) |
| elif self.method == "kNN-pytorch": |
| evaluator = kNNClassificationEvaluatorPytorch( |
| X_sampled, |
| y_sampled, |
| eval_split["text"], |
| eval_split["label"], |
| **params, |
| ) |
| elif self.method == "logReg": |
| evaluator = logRegClassificationEvaluator( |
| X_sampled, |
| y_sampled, |
| eval_split["text"], |
| eval_split["label"], |
| **params, |
| ) |
| else: |
| raise ValueError(f"Method {self.method} not supported") |
|
|
| scores_exp, test_cache = evaluator(model, test_cache=test_cache) |
| scores.append(scores_exp) |
|
|
| avg_scores: dict[str, Any] = { |
| k: np.mean([s[k] for s in scores]) for k in scores[0].keys() |
| } |
| avg_scores["scores_per_experiment"] = scores |
| return avg_scores |
|
|
| def _undersample_data(self, X, y, samples_per_label: int, idxs=None): |
| """Undersample data to have samples_per_label samples of each label""" |
| X_sampled = [] |
| y_sampled = [] |
| if idxs is None: |
| idxs = np.arange(len(y)) |
| np.random.shuffle(idxs) |
| label_counter = defaultdict(int) |
| for i in idxs: |
| if label_counter[y[i]] < samples_per_label: |
| X_sampled.append(X[i]) |
| y_sampled.append(y[i]) |
| label_counter[y[i]] += 1 |
| return X_sampled, y_sampled, idxs |
|
|