File size: 3,771 Bytes
73cc8d2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 | from __future__ import annotations
from collections import defaultdict
import datasets
from mteb.abstasks.TaskMetadata import TaskMetadata
from ....abstasks import AbsTaskRetrieval, MultilingualTask
from ....abstasks.AbsTaskRetrieval import *
_LANGUAGES = {
"fas": ["fas-Arab"],
"rus": ["rus-Cyrl"],
"zho": ["zho-Hans"],
}
def load_neuclir_data(
path: str,
langs: list,
eval_splits: list,
cache_dir: str | None = None,
revision: str | None = None,
):
corpus = {lang: {split: None for split in eval_splits} for lang in langs}
queries = {lang: {split: None for split in eval_splits} for lang in langs}
relevant_docs = {lang: {split: None for split in eval_splits} for lang in langs}
for lang in langs:
lang_corpus = datasets.load_dataset(
path, f"corpus-{lang}", cache_dir=cache_dir, revision=revision
)["corpus"]
lang_queries = datasets.load_dataset(
path, f"queries-{lang}", cache_dir=cache_dir, revision=revision
)["queries"]
lang_qrels = datasets.load_dataset(
path, f"{lang}", cache_dir=cache_dir, revision=revision
)["test"]
corpus[lang] = {
"test": {
str(e["_id"]): {"text": e["text"], "title": e["title"]}
for e in lang_corpus
}
}
queries[lang] = {"test": {str(e["_id"]): e["text"] for e in lang_queries}}
relevant_docs[lang]["test"] = defaultdict(dict)
for item in lang_qrels:
relevant_docs[lang]["test"][str(item["query-id"])].update(
{str(item["corpus-id"]): item["score"]}
)
corpus = datasets.DatasetDict(corpus)
queries = datasets.DatasetDict(queries)
relevant_docs = datasets.DatasetDict(relevant_docs)
return corpus, queries, relevant_docs
class NeuCLIR2023Retrieval(MultilingualTask, AbsTaskRetrieval):
metadata = TaskMetadata(
name="NeuCLIR2023Retrieval",
description="The task involves identifying and retrieving the documents that are relevant to the queries.",
reference="https://neuclir.github.io/",
dataset={
"path": "mteb/neuclir-2023",
"revision": "dfad7cc7fe4064d6568d6b7d43b99e3a0246d29b",
},
type="Retrieval",
category="s2p",
eval_splits=["test"],
eval_langs=_LANGUAGES,
main_score="ndcg_at_20",
date=("2022-08-01", "2023-06-30"),
form=["written"],
domains=["News"],
task_subtypes=[],
license="odc-by",
socioeconomic_status="medium",
annotations_creators="expert-annotated",
dialect=[],
text_creation="found",
bibtex_citation="""@misc{lawrie2024overview,
title={Overview of the TREC 2023 NeuCLIR Track},
author={Dawn Lawrie and Sean MacAvaney and James Mayfield and Paul McNamee and Douglas W. Oard and Luca Soldaini and Eugene Yang},
year={2024},
eprint={2404.08071},
archivePrefix={arXiv},
primaryClass={cs.IR}
}""",
n_samples={"fas": 2232092, "zho": 3179285, "rus": 4627619},
avg_character_length={
"fas": 3579.508213937439,
"zho": 2704.44834488453,
"rus": 3466.8192213553616,
},
)
def load_data(self, **kwargs):
if self.data_loaded:
return
self.corpus, self.queries, self.relevant_docs = load_neuclir_data(
path=self.metadata_dict["dataset"]["path"],
langs=self.metadata.eval_langs,
eval_splits=self.metadata_dict["eval_splits"],
cache_dir=kwargs.get("cache_dir", None),
revision=self.metadata_dict["dataset"]["revision"],
)
self.data_loaded = True
|