File size: 2,901 Bytes
73cc8d2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 | from __future__ import annotations
import datasets
from mteb.abstasks.AbsTaskRetrieval import AbsTaskRetrieval
from mteb.abstasks.TaskMetadata import TaskMetadata
class SlovakSumRetrieval(AbsTaskRetrieval):
metadata = TaskMetadata(
name="SlovakSumRetrieval",
description="""
SlovakSum, a Slovak news summarization dataset consisting of over 200 thousand
news articles with titles and short abstracts obtained from multiple Slovak newspapers.
Originally intended as a summarization task, but since no human annotations were provided
here reformulated to a retrieval task.
""",
reference="https://huggingface.co/datasets/NaiveNeuron/slovaksum",
dataset={
"path": "NaiveNeuron/slovaksum",
"revision": "85d6b32f2762313714618171b9d1a65eb7408835",
},
type="Retrieval",
category="s2s",
eval_splits=["test"],
eval_langs=["slk-Latn"],
main_score="ndcg_at_10",
date=("2015-04-26", "2022-01-11"),
form=["written"],
domains=["News", "Social", "Web"],
task_subtypes=["Article retrieval"],
license="openrail",
socioeconomic_status="mixed",
annotations_creators="derived",
dialect=[],
text_creation="found",
bibtex_citation="""
@inproceedings{OndrejowaSlovakSum24,
title = {SlovakSum: A Large Scale Slovak Summarization Dataset},
booktitle = {Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation},
author = {Ondrejová, Viktória and Šuppa, Marek},
date = {2024},
}
""",
n_samples={"test": 600},
avg_character_length={"test": 238.44},
)
def load_data(self, **kwargs):
if self.data_loaded:
return
self.corpus, self.queries, self.relevant_docs = {}, {}, {}
dataset_path = self.metadata_dict["dataset"]["path"]
n_sample = self.metadata_dict["n_samples"]["test"]
for split in kwargs.get("eval_splits", self.metadata_dict["eval_splits"]):
split_ds = datasets.load_dataset(
dataset_path, split=f"{split}[:{n_sample}]"
)
# Transforming news summary into retrieval task
queries = {f"q{e+1}": x["sum"] for e, x in enumerate(split_ds)}
corpus = {
f"d{e+1}": {"title": x["title"], "text": x["text"]}
for e, x in enumerate(split_ds)
}
qrels = {f"q{i+1}": {f"d{i+1}": 1} for i in range(split_ds.shape[0])}
self.corpus[split], self.queries[split], self.relevant_docs[split] = (
corpus,
queries,
qrels,
)
self.data_loaded = True
|