File size: 3,360 Bytes
73cc8d2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
from __future__ import annotations

import datasets

from mteb.abstasks import MultilingualTask
from mteb.abstasks.AbsTaskPairClassification import AbsTaskPairClassification
from mteb.abstasks.TaskMetadata import TaskMetadata

_LANGS = {
    "de": ["deu-Latn"],
    "en": ["eng-Latn"],
    "fr": ["fra-Latn"],
    "it": ["ita-Latn"],
}


class RTE3(MultilingualTask, AbsTaskPairClassification):
    metadata = TaskMetadata(
        name="RTE3",
        dataset={
            "path": "maximoss/rte3-multi",
            "revision": "d94f96ca5a6798e20f5a77e566f7a288dc6138d7",
        },
        description="Recognising Textual Entailment Challenge (RTE-3) aim to provide the NLP community with a benchmark to test progress in recognizing textual entailment",
        reference="https://aclanthology.org/W07-1401/",
        category="s2s",
        type="PairClassification",
        eval_splits=["test"],
        eval_langs=_LANGS,
        main_score="ap",
        date=("2023-03-25", "2024-04-15"),
        form=["written"],
        domains=["News", "Web", "Encyclopaedic"],
        task_subtypes=["Textual Entailment"],
        license="cc-by-4.0",
        socioeconomic_status="mixed",
        annotations_creators="expert-annotated",
        dialect=[],
        text_creation="found",
        bibtex_citation="""@inproceedings{giampiccolo-etal-2007-third,
            title = "The Third {PASCAL} Recognizing Textual Entailment Challenge",
            author = "Giampiccolo, Danilo  and
            Magnini, Bernardo  and
            Dagan, Ido  and
            Dolan, Bill",
            booktitle = "Proceedings of the {ACL}-{PASCAL} Workshop on Textual Entailment and Paraphrasing",
            month = jun,
            year = "2007",
            address = "Prague",
            publisher = "Association for Computational Linguistics",
            url = "https://aclanthology.org/W07-1401",
            pages = "1--9",
        }
        """,
        n_samples={"test": 1923},  # sum of 4 languages after neutral filtering
        avg_character_length={"test": 124.79},
    )

    def load_data(self, **kwargs):
        """Load dataset from HuggingFace hub"""
        if self.data_loaded:
            return
        self.dataset = datasets.load_dataset(
            self.metadata.dataset["path"], revision=self.metadata.dataset["revision"]
        )
        self.dataset_transform()
        self.data_loaded = True

    def dataset_transform(self):
        _dataset = {}
        for lang in self.langs:
            _dataset[lang] = {}
            for split in self.metadata.eval_splits:
                # keep target language
                hf_dataset = self.dataset[split].filter(lambda x: x["language"] == lang)
                # keep labels 0=entailment and 2=contradiction, and map them as 1 and 0 for binary classification
                hf_dataset = hf_dataset.filter(lambda x: x["label"] in [0, 2])
                hf_dataset = hf_dataset.map(
                    lambda example: {"label": 0 if example["label"] == 2 else 1}
                )
                _dataset[lang][split] = [
                    {
                        "sent1": hf_dataset["premise"],
                        "sent2": hf_dataset["hypothesis"],
                        "labels": hf_dataset["label"],
                    }
                ]
        self.dataset = _dataset