File size: 4,945 Bytes
73cc8d2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 | from __future__ import annotations
from datasets import load_dataset
from mteb.abstasks.TaskMetadata import TaskMetadata
from ....abstasks import MultilingualTask
from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval
_EVAL_LANGS = {
"bg": ["bul-Cyrl"],
"bn": ["ben-Beng"],
"cs": ["ces-Latn"],
"da": ["dan-Latn"],
"de": ["deu-Latn"],
"en": ["eng-Latn"],
"fa": ["fas-Arab"],
"fi": ["fin-Latn"],
"hi": ["hin-Deva"],
"it": ["ita-Latn"],
"nl": ["nld-Latn"],
"pt": ["por-Latn"],
"ro": ["ron-Latn"],
"sr": ["srp-Cyrl"],
"no": ["nor-Latn"],
"sv": ["swe-Latn"],
}
# adapted from MIRACLRetrieval
def _load_data(
path: str,
langs: list,
split: str,
cache_dir: str = None,
revision_queries: str = None,
revision_corpus: str = None,
revision_qrels: str = None,
):
queries = {lang: {split: {}} for lang in langs}
corpus = {lang: {split: {}} for lang in langs}
qrels = {lang: {split: {}} for lang in langs}
for lang in langs:
queries_path = path
corpus_path = path.replace("queries", "corpus")
qrels_path = path.replace("queries", "qrels")
queries_lang = load_dataset(
queries_path,
lang,
split=split,
cache_dir=cache_dir,
revision=revision_queries,
)
corpus_lang = load_dataset(
corpus_path,
lang,
split=split,
cache_dir=cache_dir,
revision=revision_corpus,
)
qrels_lang = load_dataset(
qrels_path,
lang,
split=split,
cache_dir=cache_dir,
revision=revision_qrels,
)
# don't pass on titles to make task harder
corpus_lang_dict = {doc["_id"]: {"text": doc["text"]} for doc in corpus_lang}
queries_lang_dict = {
query["_id"]: {"text": query["text"]} for query in queries_lang
}
# qrels_lang_dict = {qrel["query-id"]: {qrel["corpus-id"]: qrel["score"]} for qrel in qrels_lang}
qrels_lang_dict = {}
for qrel in qrels_lang:
if qrel["score"] == 0.5:
continue
# score = 0 if qrel["score"] == 0.5 else qrel["score"]
# score = int(score)
score = int(qrel["score"])
qrels_lang_dict[qrel["query-id"]] = {qrel["corpus-id"]: score}
corpus[lang][split] = corpus_lang_dict
queries[lang][split] = queries_lang_dict
qrels[lang][split] = qrels_lang_dict
return corpus, queries, qrels
class WikipediaRetrievalMultilingual(MultilingualTask, AbsTaskRetrieval):
metadata = TaskMetadata(
name="WikipediaRetrievalMultilingual",
description="The dataset is derived from Cohere's wikipedia-2023-11 dataset and contains synthetically generated queries.",
reference="https://huggingface.co/datasets/ellamind/wikipedia-2023-11-retrieval-pt",
dataset={
"path": "ellamind/wikipedia-2023-11-retrieval-multilingual-queries",
"revision": "3b6ea595c94bac3448a2ad167ca2e06abd340d6e", # avoid validation error
"revision_corpus": "f20ac0c449c85358d3d5c72a95f92f1eddc98aa5",
"revision_qrels": "ec88a7bb2da034d538e98e3122d2c98530ca1c8d",
},
type="Retrieval",
category="s2p",
eval_splits=["test"],
eval_langs=_EVAL_LANGS,
main_score="ndcg_at_10",
date=("2023-11-01", "2024-05-15"),
form=["written"],
domains=["Encyclopaedic"],
task_subtypes=["Question answering", "Article retrieval"],
license="cc-by-sa-3.0",
socioeconomic_status="mixed",
annotations_creators="LM-generated",
dialect=[],
text_creation="LM-generated and verified",
bibtex_citation="",
n_samples={
"en": 1500,
"de": 1500,
"it": 1500,
"pt": 1500,
"nl": 1500,
"cs": 1500,
"ro": 1500,
"bg": 1500,
"sr": 1500,
"fi": 1500,
"da": 1500,
"fa": 1500,
"hi": 1500,
"bn": 1500,
"no": 1500,
"sv": 1500,
},
avg_character_length={"test": 452},
)
def load_data(self, **kwargs):
if self.data_loaded:
return
self.corpus, self.queries, self.relevant_docs = _load_data(
path=self.metadata_dict["dataset"]["path"],
langs=self.hf_subsets,
split=self.metadata_dict["eval_splits"][0],
cache_dir=kwargs.get("cache_dir", None),
revision_queries=self.metadata_dict["dataset"]["revision"],
revision_corpus=self.metadata_dict["dataset"]["revision_corpus"],
revision_qrels=self.metadata_dict["dataset"]["revision_qrels"],
)
self.data_loaded = True
|