File size: 3,432 Bytes
73cc8d2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 | from __future__ import annotations
import datasets
from mteb.abstasks.TaskMetadata import TaskMetadata
from ....abstasks import AbsTaskRetrieval, MultilingualTask
from ....abstasks.AbsTaskRetrieval import *
_LANGUAGES = {
"ar": ["ara-Arab"],
"de": ["deu-Latn"],
"en": ["eng-Latn"],
"es": ["spa-Latn"],
"fr": ["fra-Latn"],
"hi": ["hin-Deva"],
"it": ["ita-Latn"],
"ja": ["jpn-Jpan"],
"ko": ["kor-Hang"],
"pt": ["por-Latn"],
"ru": ["rus-Cyrl"],
"th": ["tha-Thai"],
"zh": ["cmn-Hans"],
}
def load_mldr_data(
path: str,
langs: list,
eval_splits: list,
cache_dir: str = None,
revision: str = None,
):
corpus = {lang: {split: None for split in eval_splits} for lang in langs}
queries = {lang: {split: None for split in eval_splits} for lang in langs}
relevant_docs = {lang: {split: None for split in eval_splits} for lang in langs}
for lang in langs:
lang_corpus = datasets.load_dataset(
path, f"corpus-{lang}", cache_dir=cache_dir, revision=revision
)["corpus"]
lang_corpus = {e["docid"]: {"text": e["text"]} for e in lang_corpus}
lang_data = datasets.load_dataset(path, lang, cache_dir=cache_dir)
for split in eval_splits:
corpus[lang][split] = lang_corpus
queries[lang][split] = {e["query_id"]: e["query"] for e in lang_data[split]}
relevant_docs[lang][split] = {
e["query_id"]: {e["positive_passages"][0]["docid"]: 1}
for e in lang_data[split]
}
corpus = datasets.DatasetDict(corpus)
queries = datasets.DatasetDict(queries)
relevant_docs = datasets.DatasetDict(relevant_docs)
return corpus, queries, relevant_docs
class MultiLongDocRetrieval(MultilingualTask, AbsTaskRetrieval):
metadata = TaskMetadata(
name="MultiLongDocRetrieval",
description="MultiLongDocRetrieval",
reference="https://arxiv.org/abs/2402.03216",
dataset={
"path": "Shitao/MLDR",
"revision": "d67138e705d963e346253a80e59676ddb418810a",
},
type="Retrieval",
category="s2p",
eval_splits=["dev", "test"],
eval_langs=_LANGUAGES,
main_score="ndcg_at_10",
date=None,
form=None,
domains=None,
task_subtypes=None,
license=None,
socioeconomic_status=None,
annotations_creators=None,
dialect=None,
text_creation=None,
bibtex_citation="""@misc{bge-m3,
title={BGE M3-Embedding: Multi-Lingual, Multi-Functionality, Multi-Granularity Text Embeddings Through Self-Knowledge Distillation},
author={Jianlv Chen and Shitao Xiao and Peitian Zhang and Kun Luo and Defu Lian and Zheng Liu},
year={2024},
eprint={2402.03216},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
""",
n_samples=None,
avg_character_length=None,
)
def load_data(self, **kwargs):
if self.data_loaded:
return
self.corpus, self.queries, self.relevant_docs = load_mldr_data(
path=self.metadata_dict["dataset"]["path"],
langs=self.metadata.eval_langs,
eval_splits=self.metadata_dict["eval_splits"],
cache_dir=kwargs.get("cache_dir", None),
revision=self.metadata_dict["dataset"]["revision"],
)
self.data_loaded = True
|