File size: 6,430 Bytes
83d24b2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 | from __future__ import annotations
import itertools
import logging
from collections import defaultdict
from typing import Any
import numpy as np
from sklearn.base import ClassifierMixin, clone
from sklearn.metrics import f1_score, label_ranking_average_precision_score
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import MultiLabelBinarizer
from ..MTEBResults import HFSubset, ScoresDict
from .AbsTask import AbsTask
logger = logging.getLogger(__name__)
def evaluate_classifier(
embeddings_train: np.ndarray,
y_train: np.ndarray,
embeddings_test: np.ndarray,
y_test: np.ndarray,
classifier: ClassifierMixin,
):
scores = {}
classifier = clone(classifier)
classifier.fit(embeddings_train, y_train)
y_pred = classifier.predict(embeddings_test)
accuracy = classifier.score(embeddings_test, y_test)
f1 = f1_score(y_test, y_pred, average="macro")
scores["accuracy"] = accuracy
scores["f1"] = f1
lrap = label_ranking_average_precision_score(y_test, y_pred)
scores["lrap"] = lrap
return scores
class AbsTaskMultilabelClassification(AbsTask):
"""Abstract class for multioutput classification tasks
The similarity is computed between pairs and the results are ranked.
self.load_data() must generate a huggingface dataset with a split matching self.metadata_dict["eval_splits"], and assign it to self.dataset. It must contain the following columns:
text: str
label: list[Hashable]
"""
classifier = KNeighborsClassifier(n_neighbors=5)
def __init__(
self,
n_experiments=None,
samples_per_label=None,
batch_size=32,
**kwargs,
):
super().__init__(**kwargs)
self.batch_size = batch_size
# Bootstrap parameters
self.n_experiments = n_experiments or getattr(self, "n_experiments", 10)
self.samples_per_label = samples_per_label or getattr(
self, "samples_per_label", 8
)
# Run metadata validation by instantiating addressing the attribute
# This is quite hacky. Ideally, this would be done in the constructor of
# each concrete task, but then we have to duplicate the __init__ method's
# interface.
if hasattr(self, "metadata"):
self.metadata
def _add_main_score(self, scores):
scores["main_score"] = scores[self.metadata.main_score]
def evaluate(
self, model, eval_split="test", train_split="train", **kwargs
) -> dict[HFSubset, ScoresDict]:
if not self.data_loaded:
self.load_data()
scores = {}
hf_subsets = [l for l in self.dataset] if self.is_multilingual else ["default"]
for hf_subset in hf_subsets:
logger.info(
f"\nTask: {self.metadata.name}, split: {eval_split}, subset: {hf_subset}. Running..."
)
if hf_subset not in self.dataset and hf_subset == "default":
ds = self.dataset
else:
ds = self.dataset[hf_subset]
scores[hf_subset] = self._evaluate_subset(
model, ds, eval_split, train_split, **kwargs
)
self._add_main_score(scores[hf_subset])
return scores
def _evaluate_subset(
self, model, dataset, eval_split="test", train_split="train", **kwargs
) -> ScoresDict:
train_split = dataset[train_split]
eval_split = dataset[eval_split]
params = {
"classifier_type": type(self.classifier).__name__,
"classifier_params": self.classifier.get_params(),
"batch_size": self.batch_size,
}
params.update(kwargs)
scores = []
# Bootstrap sample indices from training set for each experiment
train_samples = []
for _ in range(self.n_experiments):
sample_indices, _ = self._undersample_data_indices(
train_split["label"], self.samples_per_label, None
)
train_samples.append(sample_indices)
# Encode all unique sentences at the indices
unique_train_indices = list(set(itertools.chain.from_iterable(train_samples)))
unique_train_sentences = train_split.select(unique_train_indices)["text"]
unique_train_embeddings = dict(
zip(unique_train_indices, model.encode(unique_train_sentences))
)
test_text = eval_split["text"]
binarizer = MultiLabelBinarizer()
y_test = binarizer.fit_transform(eval_split["label"])
# Stratified subsampling of test set to 2000 examples.
try:
if len(test_text) > 2000:
test_text, _, y_test, _ = train_test_split(
test_text, y_test, stratify=y_test, train_size=2000
)
except ValueError:
logger.warn("Couldn't subsample, continuing with the entire test set.")
X_test = model.encode(test_text)
for i_experiment, sample_indices in enumerate(train_samples):
logger.info(
"=" * 10
+ f" Experiment {i_experiment+1}/{self.n_experiments} "
+ "=" * 10
)
X_train = np.stack([unique_train_embeddings[idx] for idx in sample_indices])
y_train = train_split.select(sample_indices)["label"]
y_train = binarizer.transform(y_train)
scores_exp = evaluate_classifier(
X_train, y_train, X_test, y_test, self.classifier
)
scores.append(scores_exp)
avg_scores: dict[str, Any] = {
k: np.mean([s[k] for s in scores]) for k in scores[0].keys()
}
avg_scores["scores_per_experiment"] = scores
return avg_scores
def _undersample_data_indices(self, y, samples_per_label, idxs=None):
"""Undersample data to have samples_per_label samples of each label"""
sample_indices = []
if idxs is None:
idxs = np.arange(len(y))
np.random.shuffle(idxs)
label_counter = defaultdict(int)
for i in idxs:
if any((label_counter[label] < samples_per_label) for label in y[i]):
sample_indices.append(i)
for label in y[i]:
label_counter[label] += 1
return sample_indices, idxs
|