File size: 3,731 Bytes
73cc8d2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 | from __future__ import annotations
from collections import defaultdict
import datasets
from mteb.abstasks.TaskMetadata import TaskMetadata
from ....abstasks import AbsTaskRetrieval, MultilingualTask
from ....abstasks.AbsTaskRetrieval import *
_LANGUAGES = {
"fas": ["fas-Arab"],
"rus": ["rus-Cyrl"],
"zho": ["zho-Hans"],
}
def load_neuclir_data(
path: str,
langs: list,
eval_splits: list,
cache_dir: str | None = None,
revision: str | None = None,
):
corpus = {lang: {split: None for split in eval_splits} for lang in langs}
queries = {lang: {split: None for split in eval_splits} for lang in langs}
relevant_docs = {lang: {split: None for split in eval_splits} for lang in langs}
for lang in langs:
lang_corpus = datasets.load_dataset(
path, f"corpus-{lang}", cache_dir=cache_dir, revision=revision
)["corpus"]
lang_queries = datasets.load_dataset(
path, f"queries-{lang}", cache_dir=cache_dir, revision=revision
)["queries"]
lang_qrels = datasets.load_dataset(
path, f"{lang}", cache_dir=cache_dir, revision=revision
)["test"]
corpus[lang] = {
"test": {
str(e["_id"]): {"text": e["text"], "title": e["title"]}
for e in lang_corpus
}
}
queries[lang] = {"test": {str(e["_id"]): e["text"] for e in lang_queries}}
relevant_docs[lang]["test"] = defaultdict(dict)
for item in lang_qrels:
relevant_docs[lang]["test"][str(item["query-id"])].update(
{str(item["corpus-id"]): item["score"]}
)
corpus = datasets.DatasetDict(corpus)
queries = datasets.DatasetDict(queries)
relevant_docs = datasets.DatasetDict(relevant_docs)
return corpus, queries, relevant_docs
class NeuCLIR2022Retrieval(MultilingualTask, AbsTaskRetrieval):
metadata = TaskMetadata(
name="NeuCLIR2022Retrieval",
description="The task involves identifying and retrieving the documents that are relevant to the queries.",
reference="https://neuclir.github.io/",
dataset={
"path": "mteb/neuclir-2022",
"revision": "920fc15b81e2324e52163904be663f340235cdea",
},
type="Retrieval",
category="s2p",
eval_splits=["test"],
eval_langs=_LANGUAGES,
main_score="ndcg_at_20",
date=("2021-08-01", "2022-06-30"),
form=["written"],
domains=["News"],
task_subtypes=[],
license="odc-by",
socioeconomic_status="medium",
annotations_creators="expert-annotated",
dialect=[],
text_creation="found",
bibtex_citation="""@article{lawrie2023overview,
title={Overview of the TREC 2022 NeuCLIR track},
author={Lawrie, Dawn and MacAvaney, Sean and Mayfield, James and McNamee, Paul and Oard, Douglas W and Soldaini, Luca and Yang, Eugene},
journal={arXiv preprint arXiv:2304.12367},
year={2023}
}""",
n_samples={"fas": 2232130, "zho": 3179323, "rus": 4627657},
avg_character_length={
"fas": 3500.5143969099317,
"zho": 2543.1140667919617,
"rus": 3214.755239654659,
},
)
def load_data(self, **kwargs):
if self.data_loaded:
return
self.corpus, self.queries, self.relevant_docs = load_neuclir_data(
path=self.metadata_dict["dataset"]["path"],
langs=self.metadata.eval_langs,
eval_splits=self.metadata_dict["eval_splits"],
cache_dir=kwargs.get("cache_dir", None),
revision=self.metadata_dict["dataset"]["revision"],
)
self.data_loaded = True
|