File size: 8,646 Bytes
83d24b2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 | from __future__ import annotations
import logging
import random
from abc import ABC, abstractmethod
from typing import Any, Sequence
import datasets
import numpy as np
import torch
from datasets import DatasetDict
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MultiLabelBinarizer
from mteb.abstasks.TaskMetadata import TaskMetadata
from mteb.encoder_interface import Encoder, EncoderWithQueryCorpusEncode
from mteb.languages import LanguageScripts
from ..MTEBResults import HFSubset, ScoresDict
logger = logging.getLogger(__name__)
def _multilabel_subsampling(
dataset_dict: datasets.DatasetDict,
seed: int,
splits: list[str] = ["test"],
label: str = "label",
n_samples: int = 2048,
) -> datasets.DatasetDict:
"""Startified subsampling for multilabel problems."""
for split in splits:
labels = dataset_dict[split][label]
encoded_labels = MultiLabelBinarizer().fit_transform(labels)
idxs = np.arange(len(labels))
try:
idxs, *_ = train_test_split(
idxs,
encoded_labels,
stratify=encoded_labels,
random_state=seed,
train_size=n_samples,
)
except ValueError:
logger.warn("Couldn't subsample, continuing with full split.")
dataset_dict.update({split: dataset_dict[split].select(idxs)})
return dataset_dict
class AbsTask(ABC):
metadata: TaskMetadata
superseeded_by: None | str = None
def __init__(self, seed: int = 42, **kwargs: Any):
self.dataset = None
self.data_loaded = False
self.is_multilingual = False
self.is_crosslingual = False
self.save_suffix = kwargs.get("save_suffix", "")
self.seed = seed
random.seed(self.seed)
np.random.seed(self.seed)
torch.manual_seed(self.seed)
torch.cuda.manual_seed_all(self.seed)
def check_if_dataset_is_superseeded(self):
"""Check if the dataset is superseeded by a newer version"""
if self.superseeded_by:
logger.warning(
f"Dataset '{self.metadata.name}' is superseeded by '{self.superseeded_by}', you might consider using the newer version of the dataset."
)
def dataset_transform(self):
"""Transform operations applied to the dataset after loading.
Override this method if your dataset requires any transformation.
"""
pass
def evaluate(
self,
model: Encoder | EncoderWithQueryCorpusEncode,
split: str = "test",
**kwargs: Any,
) -> dict[HFSubset, ScoresDict]:
"""Evaluates a Sentence Embedding Model on the task.
Returns a dict (that can be serialized to json).
Args:
model: Sentence embedding method. Implements a encode(sentences) method, that encodes sentences and returns a numpy matrix with the
sentence embeddings
split: Which datasplit to be used.
kwargs: Additional keyword arguments that are passed to the _evaluate_subset method.
"""
if not self.data_loaded:
self.load_data()
self.dataset: dict[HFSubset, DatasetDict]
scores = {}
hf_subsets = (
[l for l in self.dataset.keys()]
if self.is_crosslingual or self.is_multilingual
else ["default"]
)
for hf_subset in hf_subsets:
logger.info(
f"\nTask: {self.metadata_dict['name']}, split: {split}, subset: {hf_subset}. Running..."
)
if hf_subset not in self.dataset and hf_subset == "default":
data_split = self.dataset[split]
else:
data_split = self.dataset[hf_subset][split]
scores[hf_subset] = self._evaluate_subset(model, data_split, **kwargs)
return scores
@abstractmethod
def _evaluate_subset(self, model, data_split, **kwargs) -> ScoresDict:
raise NotImplementedError(
"If you are using the default evaluate method, you must implement _evaluate_subset method."
)
@staticmethod
def stratified_subsampling(
dataset_dict: datasets.DatasetDict,
seed: int,
splits: list[str] = ["test"],
label: str = "label",
n_samples: int = 2048,
) -> datasets.DatasetDict:
"""Subsamples the dataset with stratification by the supplied label.
Returns a datasetDict object.
Args:
dataset_dict: the DatasetDict object.
seed: the random seed.
splits: the splits of the dataset.
label: the label with which the stratified sampling is based on.
n_samples: Optional, number of samples to subsample. Default is max_n_samples.
"""
## Can only do this if the label column is of ClassLabel.
if not isinstance(dataset_dict[splits[0]].features[label], datasets.ClassLabel):
try:
dataset_dict = dataset_dict.class_encode_column(label)
except ValueError as e:
if isinstance(dataset_dict[splits[0]][label][0], Sequence):
return _multilabel_subsampling(
dataset_dict, seed, splits, label, n_samples
)
else:
raise e
for split in splits:
dataset_dict.update(
{
split: dataset_dict[split].train_test_split(
test_size=n_samples, seed=seed, stratify_by_column=label
)["test"]
}
) ## only take the specified test split.
return dataset_dict
def load_data(self, **kwargs):
"""Load dataset from HuggingFace hub"""
if self.data_loaded:
return
self.dataset = datasets.load_dataset(**self.metadata_dict["dataset"]) # type: ignore
self.dataset_transform()
self.data_loaded = True
@property
def metadata_dict(self) -> dict[str, Any]:
return dict(self.metadata)
@property
def languages(self) -> list[str]:
"""Returns the languages of the task"""
# check if self.hf_subsets is set
has_lang_splits = self.is_crosslingual or self.is_multilingual
if has_lang_splits and hasattr(self, "hf_subsets"):
assert isinstance(
self.metadata.eval_langs, dict
), "eval_langs must be dict for multilingual tasks"
eval_langs = self.metadata.eval_langs
languages = []
for lang in self.hf_subsets:
for langscript in eval_langs[lang]:
iso_lang, script = langscript.split("-")
languages.append(iso_lang)
return sorted(set(languages))
return self.metadata.languages
def filter_languages(
self, languages: list[str] | None, script: list[str] | None = None
) -> AbsTask:
"""Filter the languages of the task.
Args:
languages: list of languages to filter the task by can be either a 3-letter langauge code (e.g. "eng") or also include the script
(e.g. "eng-Latn")
script: list of scripts to filter the task by. Will be ignored if language code specified the script. If None, all scripts are included.
If the language code does not specify the script the intersection of the language and script will be used.
"""
lang_scripts = LanguageScripts.from_languages_and_scripts(languages, script)
subsets_to_keep = []
if not isinstance(self.metadata.eval_langs, dict):
self.hf_subsets = self.metadata.eval_langs
return self
for hf_subset, langs in self.metadata.eval_langs.items():
for langscript in langs:
if lang_scripts.contains_language(
langscript
) or lang_scripts.contains_script(langscript):
subsets_to_keep.append(hf_subset)
break
self.hf_subsets = subsets_to_keep
return self
def __repr__(self) -> str:
"""Format the representation of the task such that it appears as:
TaskObjectName(name='{name}', languages={lang1, lang2, ...})
"""
langs = self.languages
if len(langs) > 3:
langs = langs[:3]
langs.append("...")
return (
f"{self.__class__.__name__}(name='{self.metadata.name}', languages={langs})"
)
|