File size: 4,878 Bytes
73cc8d2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 | from typing import Dict, List
import datasets
from mteb.abstasks import AbsTaskRetrieval, CrosslingualTask, TaskMetadata
_LANGUAGES = {
"wmt19.de.fr": ["deu-Latn", "fra-Latn"],
"wmt19.fr.de": ["fra-Latn", "deu-Latn"],
}
def _build_lang_pair(langs: List[str]) -> str:
"""Builds a language pair separated by a dash.
e.g., ['eng-Latn', 'deu-Latn'] -> 'eng-deu'.
"""
return langs[0].split("-")[0] + "-" + langs[1].split("-")[0]
def extend_lang_pairs() -> Dict[str, List[str]]:
eval_langs = {}
for langs in _LANGUAGES.values():
lang_pair = _build_lang_pair(langs)
eval_langs[lang_pair] = langs
return eval_langs
_EVAL_LANGS = extend_lang_pairs()
class CrossLingualSemanticDiscriminationWMT19(AbsTaskRetrieval, CrosslingualTask):
metadata = TaskMetadata(
name="CrossLingualSemanticDiscriminationWMT19",
dataset={
"path": "Andrianos/clsd_wmt19_21",
"revision": "9627fbdb39b827ee5c066011ebe1e947cdb137bd",
},
description="Evaluate a multilingual embedding model based on its ability to discriminate against the original parallel pair against challenging distractors - spawning from WMT19 DE-FR test set",
reference="https://huggingface.co/datasets/Andrianos/clsd_wmt19_21",
type="Retrieval",
category="s2s",
eval_splits=["test"],
eval_langs=_EVAL_LANGS,
main_score="recall_at_1",
date=("2018-01-01", "2023-12-12"),
form=["written"],
domains=["News"],
task_subtypes=["Cross-Lingual Semantic Discrimination"],
license="CC BY-SA 4.0",
socioeconomic_status="high",
annotations_creators="derived",
dialect=[],
text_creation="LM-generated and verified",
bibtex_citation="preprint_coming",
n_samples={"test": 2946},
avg_character_length={"test": 161},
)
def __init__(self, **kwargs):
self.num_of_distractors = 4
super().__init__(**kwargs)
def load_data(self, **kwargs):
"""Generic data loader function for original clsd datasets with the format shown in "hf_dataset_link".
Loading the hf dataset, it populates the following three variables to be used for retrieval evaluation.
self.corpus
self.queries
self.relevant_docs
Sets self.data_loaded to True.
"""
if self.data_loaded:
return
queries, corpus, relevant_docs = {}, {}, {}
dataset_raw = {}
for split in self.metadata.eval_splits:
for hf_subset, langs in _LANGUAGES.items():
lang_pair = _build_lang_pair(langs)
dataset_raw[lang_pair] = datasets.load_dataset(
name=hf_subset,
**self.metadata_dict["dataset"],
)[split]
queries[lang_pair] = {}
corpus[lang_pair] = {}
relevant_docs[lang_pair] = {}
queries[lang_pair][split] = {}
corpus[lang_pair][split] = {}
relevant_docs[lang_pair][split] = {}
# Generate unique IDs for queries and documents
query_id_counter = 1
document_id_counter = 1
for row in dataset_raw[lang_pair]:
query_text = row["Source"]
positive_text = [row["Target"]]
negative_texts = [
row[f"TargetAdv{str(i)}"]
for i in range(
1, self.num_of_distractors + 1
) # Four Distractors. Columns are named TargetAdv1-TargetAdv4
]
# Assign unique ID to the query
query_id = f"Q{query_id_counter}"
queries[lang_pair][split][query_id] = query_text
query_id_counter += 1
# Add true parallel and distractors to corpus with unique id.
for text in positive_text + negative_texts:
doc_id = f"D{document_id_counter}"
corpus[lang_pair][split][doc_id] = {"text": text}
document_id_counter += 1
# Add relevant document information to relevant_docs for positive texts only
if text in positive_text:
if query_id not in relevant_docs[lang_pair][split]:
relevant_docs[lang_pair][split][query_id] = {}
relevant_docs[lang_pair][split][query_id][doc_id] = 1
self.corpus = datasets.DatasetDict(corpus)
self.queries = datasets.DatasetDict(queries)
self.relevant_docs = datasets.DatasetDict(relevant_docs)
self.data_loaded = True
|