File size: 1,773 Bytes
73cc8d2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 | from __future__ import annotations
from mteb.abstasks.TaskMetadata import TaskMetadata
from ....abstasks import MultilingualTask
from ....abstasks.AbsTaskPairClassification import AbsTaskPairClassification
class PawsX(MultilingualTask, AbsTaskPairClassification):
metadata = TaskMetadata(
name="PawsX",
dataset={
"path": "paws-x",
"revision": "8a04d940a42cd40658986fdd8e3da561533a3646",
},
description="",
reference="https://arxiv.org/abs/1908.11828",
category="s2s",
type="PairClassification",
eval_splits=["test", "validation"],
eval_langs={
"de": ["deu-Latn"],
"en": ["eng-Latn"],
"es": ["spa-Latn"],
"fr": ["fra-Latn"],
"ja": ["jpn-Hira"],
"ko": ["kor-Hang"],
"zh": ["cmn-Hans"],
},
main_score="ap",
date=None,
form=None,
domains=None,
task_subtypes=None,
license=None,
socioeconomic_status=None,
annotations_creators=None,
dialect=None,
text_creation=None,
bibtex_citation=None,
n_samples=None,
avg_character_length=None,
)
def dataset_transform(self):
_dataset = {}
for lang in self.hf_subsets:
_dataset[lang] = {}
for split in self.metadata.eval_splits:
hf_dataset = self.dataset[lang][split]
_dataset[lang][split] = [
{
"sent1": hf_dataset["sentence1"],
"sent2": hf_dataset["sentence2"],
"labels": hf_dataset["label"],
}
]
self.dataset = _dataset
|