File size: 4,587 Bytes
73cc8d2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 | from __future__ import annotations
import itertools
import numpy as np
from datasets import Dataset, DatasetDict
from mteb.abstasks import AbsTaskClustering, MultilingualTask
from mteb.abstasks.AbsTaskClusteringFast import AbsTaskClusteringFast
from mteb.abstasks.TaskMetadata import TaskMetadata
_LANGUAGES = {
"bs": ["bos-Latn"],
"ca": ["cat-Latn"],
"cs": ["ces-Latn"],
"da": ["dan-Latn"],
"eu": ["eus-Latn"],
"gv": ["glv-Latn"],
"ilo": ["ilo-Latn"],
"ku": ["kur-Latn"],
"lv": ["lav-Latn"],
"min": ["min-Latn"],
"mt": ["mlt-Latn"],
"sco": ["sco-Latn"],
"sq": ["sqi-Latn"],
"wa": ["wln-Latn"],
}
class WikiClusteringP2P(AbsTaskClustering, MultilingualTask):
superseeded_by = "WikiClusteringFastP2P"
metadata = TaskMetadata(
name="WikiClusteringP2P",
description="Clustering of wikipedia articles inspired by BlubrbsClusteringP2P. Labels are taken from top-level categories of the respective languages (e.g., https://lv.wikipedia.org/wiki/Kategorija:Pamatkategorijas).",
reference="https://github.com/Rysias/wiki-clustering",
dataset={
"path": "ryzzlestrizzle/multi-wiki-clustering-p2p",
"revision": "d4d92f8f28be71035be6a96bdfd4e200cf62faa8",
},
type="Clustering",
category="p2p",
eval_splits=["test"],
eval_langs=_LANGUAGES,
main_score="v_measure",
date=("2001-01-15", "2024-04-15"),
form=["written"],
domains=["Encyclopaedic"],
task_subtypes=["Thematic clustering"],
license="cc-by-sa-3.0",
socioeconomic_status="mixed",
annotations_creators="derived",
dialect=[],
text_creation="created",
bibtex_citation=None, # None exists
n_samples={"test": 71680},
avg_character_length={"test": 625.3},
)
class WikiClusteringFastP2P(AbsTaskClusteringFast, MultilingualTask):
metadata = TaskMetadata(
name="WikiClusteringFastP2P",
description="Clustering of wikipedia articles inspired by BlubrbsClusteringP2P. Labels are taken from top-level categories of the respective languages (e.g., https://lv.wikipedia.org/wiki/Kategorija:Pamatkategorijas).",
reference="https://github.com/Rysias/wiki-clustering",
dataset={
"path": "ryzzlestrizzle/multi-wiki-clustering-p2p",
"revision": "d4d92f8f28be71035be6a96bdfd4e200cf62faa8",
},
type="Clustering",
category="p2p",
eval_splits=["test"],
eval_langs=_LANGUAGES,
main_score="v_measure",
date=("2001-01-15", "2024-04-15"),
form=["written"],
domains=["Encyclopaedic"],
task_subtypes=["Thematic clustering"],
license="cc-by-sa-3.0",
socioeconomic_status="mixed",
annotations_creators="derived",
dialect=[],
text_creation="created",
bibtex_citation="", # None exists
n_samples={"test": 2048},
avg_character_length={"test": 625.3},
)
def dataset_transform(self):
ds = dict()
for lang in self.hf_subsets:
labels = []
sentences = []
ds[lang] = dict()
lang_dict = dict()
for split in self.metadata.eval_splits:
labels.extend(
itertools.chain.from_iterable(self.dataset[lang][split]["labels"])
)
sentences.extend(
itertools.chain.from_iterable(
self.dataset[lang][split]["sentences"]
)
)
# Remove sentences and labels with only 1 label example.
unique_labels, counts = np.unique(labels, return_counts=True)
solo_label_idx = np.where(counts == 1)
solo_labels = unique_labels[solo_label_idx]
is_solo = np.isin(labels, solo_labels)
split_ds = Dataset.from_dict({"labels": labels, "sentences": sentences})
if is_solo.any():
split_ds = split_ds.select(np.nonzero(is_solo == False)[0]) # noqa: E712
lang_dict.update({split: split_ds})
ds[lang] = DatasetDict(lang_dict)
self.dataset = DatasetDict(ds)
for lang in self.hf_subsets:
self.dataset[lang] = self.stratified_subsampling(
self.dataset[lang],
self.seed,
self.metadata.eval_splits,
label="labels",
n_samples=2048,
)
|