File size: 3,391 Bytes
73cc8d2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 | import datasets
from mteb.abstasks.TaskMetadata import TaskMetadata
from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval
class LEMBQMSumRetrieval(AbsTaskRetrieval):
_EVAL_SPLIT = "test"
metadata = TaskMetadata(
name="LEMBQMSumRetrieval",
dataset={
"path": "dwzhu/LongEmbed",
"revision": "6e346642246bfb4928c560ee08640dc84d074e8c",
"name": "qmsum",
},
reference="https://huggingface.co/datasets/dwzhu/LongEmbed",
description=("qmsum subset of dwzhu/LongEmbed dataset."),
type="Retrieval",
category="s2p",
eval_splits=[_EVAL_SPLIT],
eval_langs=["eng-Latn"],
main_score="ndcg_at_10",
date=("1950-01-01", "2021-12-31"),
form=["written"],
domains=["Spoken"],
task_subtypes=["Article retrieval"],
license="Not specified",
socioeconomic_status="medium",
annotations_creators="derived",
dialect=[],
text_creation="found",
bibtex_citation="""
@inproceedings{zhong-etal-2021-qmsum,
title = "{QMS}um: A New Benchmark for Query-based Multi-domain Meeting Summarization",
author = "Zhong, Ming and
Yin, Da and
Yu, Tao and
Zaidi, Ahmad and
Mutuma, Mutethia and
Jha, Rahul and
Awadallah, Ahmed Hassan and
Celikyilmaz, Asli and
Liu, Yang and
Qiu, Xipeng and
Radev, Dragomir",
editor = "Toutanova, Kristina and
Rumshisky, Anna and
Zettlemoyer, Luke and
Hakkani-Tur, Dilek and
Beltagy, Iz and
Bethard, Steven and
Cotterell, Ryan and
Chakraborty, Tanmoy and
Zhou, Yichao",
booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
month = jun,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.naacl-main.472",
doi = "10.18653/v1/2021.naacl-main.472",
pages = "5905--5921",
abstract = "",
}
""",
n_samples={_EVAL_SPLIT: 1724},
avg_character_length={_EVAL_SPLIT: 56136.4},
)
def load_data(self, **kwargs):
if self.data_loaded:
return
query_list = datasets.load_dataset(**self.metadata_dict["dataset"])[
"queries"
] # dict_keys(['qid', 'text'])
queries = {row["qid"]: row["text"] for row in query_list}
corpus_list = datasets.load_dataset(**self.metadata_dict["dataset"])[
"corpus"
] # dict_keys(['doc_id', 'text'])
corpus = {row["doc_id"]: {"text": row["text"]} for row in corpus_list}
qrels_list = datasets.load_dataset(**self.metadata_dict["dataset"])[
"qrels"
] # dict_keys(['qid', 'doc_id'])
qrels = {row["qid"]: {row["doc_id"]: 1} for row in qrels_list}
self.corpus = {self._EVAL_SPLIT: corpus}
self.queries = {self._EVAL_SPLIT: queries}
self.relevant_docs = {self._EVAL_SPLIT: qrels}
self.data_loaded = True
|