File size: 4,261 Bytes
73cc8d2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 | from __future__ import annotations
import json
import datasets
from mteb.abstasks.TaskMetadata import TaskMetadata
from ....abstasks import MultilingualTask
from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval
_EVAL_SPLITS = ["dev", "test"]
_LANGS = {
# <iso_639_3>-<ISO_15924>
"english": ["eng-Latn"],
"french": ["fra-Latn"],
}
def _load_statcan_data(
path: str, langs: list, splits: str, cache_dir: str = None, revision: str = None
):
queries = {lang: {split: {} for split in splits} for lang in langs}
corpus = {lang: {split: {} for split in splits} for lang in langs}
relevant_docs = {lang: {split: {} for split in splits} for lang in langs}
for split in splits:
for lang in langs:
query_table = datasets.load_dataset(
path,
f"queries_{lang}",
split=split,
cache_dir=cache_dir,
revision=revision,
)
corpus_table = datasets.load_dataset(
path,
"corpus",
split=lang,
cache_dir=cache_dir,
revision=revision,
)
for row in query_table:
query = json.loads(row["query"])
query_id = row["query_id"]
doc_id = row["doc_id"]
queries[lang][split][query_id] = query
if query_id not in relevant_docs[lang][split]:
relevant_docs[lang][split][query_id] = {}
relevant_docs[lang][split][query_id][doc_id] = 1
for row in corpus_table:
doc_id = row["doc_id"]
doc_content = row["doc"]
corpus[lang][split][doc_id] = {"text": doc_content}
corpus = datasets.DatasetDict(corpus)
queries = datasets.DatasetDict(queries)
relevant_docs = datasets.DatasetDict(relevant_docs)
return corpus, queries, relevant_docs
class StatcanDialogueDatasetRetrieval(MultilingualTask, AbsTaskRetrieval):
metadata = TaskMetadata(
name="StatcanDialogueDatasetRetrieval",
description="A Dataset for Retrieving Data Tables through Conversations with Genuine Intents, available in English and French.",
dataset={
"path": "McGill-NLP/statcan-dialogue-dataset-retrieval",
"revision": "7a26938c93e99e0759a1df416896bb72527e2f33",
},
type="Retrieval",
category="s2p",
eval_splits=_EVAL_SPLITS,
eval_langs=_LANGS,
main_score="recall_at_10",
reference="https://mcgill-nlp.github.io/statcan-dialogue-dataset/",
date=("2020-01-01", "2020-04-15"),
form=["written"],
domains=["Government", "Web"],
task_subtypes=["Conversational retrieval"],
license="https://huggingface.co/datasets/McGill-NLP/statcan-dialogue-dataset-retrieval/blob/main/LICENSE.md",
socioeconomic_status="high",
annotations_creators="derived",
dialect=[],
text_creation="found",
bibtex_citation="""
@inproceedings{lu-etal-2023-statcan,
title = "The {S}tat{C}an Dialogue Dataset: Retrieving Data Tables through Conversations with Genuine Intents",
author = "Lu, Xing Han and
Reddy, Siva and
de Vries, Harm",
booktitle = "Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics",
month = may,
year = "2023",
address = "Dubrovnik, Croatia",
publisher = "Association for Computational Linguistics",
url = "https://arxiv.org/abs/2304.01412",
pages = "2799--2829",
}
""",
n_samples={"dev": 1000, "test": 1011, "corpus": 5907},
avg_character_length={"dev": 776.58, "test": 857.13, "corpus": 6806.97},
)
def load_data(self, **kwargs):
if self.data_loaded:
return
self.corpus, self.queries, self.relevant_docs = _load_statcan_data(
path=self.metadata_dict["dataset"]["path"],
langs=list(_LANGS.keys()),
splits=self.metadata_dict["eval_splits"],
cache_dir=kwargs.get("cache_dir", None),
revision=self.metadata_dict["dataset"]["revision"],
)
self.data_loaded = True
|