File size: 2,785 Bytes
73cc8d2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 | from __future__ import annotations
import datasets
from mteb.abstasks.TaskMetadata import TaskMetadata
from ....abstasks import AbsTaskPairClassification, MultilingualTask
_LANGUAGES = {
"de": ["deu-Latn"],
"en": ["eng-Latn"],
"fi": ["fin-Latn"],
"fr": ["fra-Latn"],
"ru": ["rus-Cyrl"],
"sv": ["swe-Latn"],
}
class OpusparcusPC(AbsTaskPairClassification, MultilingualTask):
metadata = TaskMetadata(
name="OpusparcusPC",
dataset={
"path": "GEM/opusparcus",
"revision": "9e9b1f8ef51616073f47f306f7f47dd91663f86a",
},
description="Opusparcus is a paraphrase corpus for six European language: German, English, Finnish, French, Russian, and Swedish. The paraphrases consist of subtitles from movies and TV shows.",
reference="https://gem-benchmark.com/data_cards/opusparcus",
category="s2s",
type="PairClassification",
eval_splits=["test.full", "validation.full"],
eval_langs=_LANGUAGES,
main_score="ap",
date=None,
form=None,
domains=None,
task_subtypes=None,
license=None,
socioeconomic_status=None,
annotations_creators=None,
dialect=None,
text_creation=None,
bibtex_citation=None,
n_samples=None,
avg_character_length=None,
)
def load_data(self, **kwargs):
"""Load dataset from HuggingFace hub"""
if self.data_loaded:
return
self.dataset = {}
for lang in self.hf_subsets:
self.dataset[lang] = datasets.load_dataset(
lang=lang,
quality=100,
**self.metadata_dict["dataset"],
)
self.dataset_transform(lang)
self.data_loaded = True
def dataset_transform(self, lang):
for split in self.dataset[lang]:
# Renaming features
labels = self.dataset[lang][split]["annot_score"]
sent1 = self.dataset[lang][split]["input"]
sent2 = self.dataset[lang][split]["target"]
new_dict = {}
# Labels are a score between 1.0 and 4.0, and we need binary classification
labels = [
0 if label < 2.5 else 1 if label > 2.5 else 2.5 for label in labels
]
# Get neutral label to delete them
neutral = [i for i, val in enumerate(labels) if val == 2.5]
for i in sorted(neutral, reverse=True):
del labels[i]
del sent1[i]
del sent2[i]
new_dict["labels"] = [labels]
new_dict["sent1"] = [sent1]
new_dict["sent2"] = [sent2]
self.dataset[lang][split] = datasets.Dataset.from_dict(new_dict)
|