File size: 5,632 Bytes
83d24b2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 | from __future__ import annotations
import logging
from collections import defaultdict
from typing import Any
import numpy as np
from ..evaluation.evaluators import (
kNNClassificationEvaluator,
kNNClassificationEvaluatorPytorch,
logRegClassificationEvaluator,
)
from ..MTEBResults import HFSubset, ScoresDict
from .AbsTask import AbsTask
logger = logging.getLogger(__name__)
class AbsTaskClassification(AbsTask):
"""Abstract class for kNN classification tasks
The similarity is computed between pairs and the results are ranked.
self.load_data() must generate a huggingface dataset with a split matching self.metadata_dict["eval_splits"], and assign it to self.dataset. It
must contain the following columns:
text: str
label: int
"""
def __init__(
self,
method: str = "logReg",
n_experiments: int | None = None,
samples_per_label: int | None = None,
k: int = 3,
batch_size: int = 32,
**kwargs,
):
super().__init__(**kwargs)
self.batch_size = batch_size
self.method = method
# Bootstrap parameters
self.n_experiments: int = ( # type: ignore
n_experiments
if n_experiments is not None
else self.metadata_dict.get("n_experiments", 10)
)
self.samples_per_label: int = ( # type: ignore
samples_per_label
if samples_per_label is not None
else self.metadata_dict.get("samples_per_label", 8)
)
# kNN parameters
self.k = k
# Run metadata validation by instantiating addressing the attribute
# This is quite hacky. Ideally, this would be done in the constructor of
# each concrete task, but then we have to duplicate the __init__ method's
# interface.
if hasattr(self, "metadata"):
self.metadata
def _add_main_score(self, scores: dict[HFSubset, ScoresDict]) -> None:
scores["main_score"] = scores[self.metadata.main_score]
def evaluate(
self, model, eval_split="test", train_split="train", **kwargs
) -> dict[HFSubset, ScoresDict]:
if not self.data_loaded:
self.load_data()
scores = {}
hf_subsets = [l for l in self.dataset] if self.is_multilingual else ["default"]
for hf_subset in hf_subsets:
logger.info(
f"\nTask: {self.metadata.name}, split: {eval_split}, subset: {hf_subset}. Running..."
)
if hf_subset not in self.dataset and hf_subset == "default":
ds = self.dataset
else:
ds = self.dataset[hf_subset]
scores[hf_subset] = self._evaluate_subset(
model, ds, eval_split, train_split, **kwargs
)
self._add_main_score(scores[hf_subset])
return scores
def _evaluate_subset(
self, model, dataset, eval_split="test", train_split="train", **kwargs
) -> ScoresDict:
train_split = dataset[train_split]
eval_split = dataset[eval_split]
params = {"k": self.k, "batch_size": self.batch_size}
params.update(kwargs)
scores = []
test_cache, idxs = (
None,
None,
) # we store idxs to make the shuffling reproducible
for i in range(self.n_experiments):
logger.info(
"=" * 10 + f" Experiment {i+1}/{self.n_experiments} " + "=" * 10
)
# Bootstrap `self.samples_per_label` samples per label for each split
X_sampled, y_sampled, idxs = self._undersample_data(
train_split["text"], train_split["label"], self.samples_per_label, idxs
)
if self.method == "kNN":
evaluator = kNNClassificationEvaluator(
X_sampled,
y_sampled,
eval_split["text"],
eval_split["label"],
**params,
)
elif self.method == "kNN-pytorch":
evaluator = kNNClassificationEvaluatorPytorch(
X_sampled,
y_sampled,
eval_split["text"],
eval_split["label"],
**params,
)
elif self.method == "logReg":
evaluator = logRegClassificationEvaluator(
X_sampled,
y_sampled,
eval_split["text"],
eval_split["label"],
**params,
)
else:
raise ValueError(f"Method {self.method} not supported")
scores_exp, test_cache = evaluator(model, test_cache=test_cache)
scores.append(scores_exp)
avg_scores: dict[str, Any] = {
k: np.mean([s[k] for s in scores]) for k in scores[0].keys()
}
avg_scores["scores_per_experiment"] = scores
return avg_scores
def _undersample_data(self, X, y, samples_per_label: int, idxs=None):
"""Undersample data to have samples_per_label samples of each label"""
X_sampled = []
y_sampled = []
if idxs is None:
idxs = np.arange(len(y))
np.random.shuffle(idxs)
label_counter = defaultdict(int)
for i in idxs:
if label_counter[y[i]] < samples_per_label:
X_sampled.append(X[i])
y_sampled.append(y[i])
label_counter[y[i]] += 1
return X_sampled, y_sampled, idxs
|