File size: 2,561 Bytes
83d24b2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
from __future__ import annotations

from collections import Counter

import datasets
from datasets import DatasetDict

from mteb.abstasks import AbsTaskClassification, MultilingualTask
from mteb.abstasks.TaskMetadata import TaskMetadata


class TurkicClassification(MultilingualTask, AbsTaskClassification):
    metadata = TaskMetadata(
        name="TurkicClassification",
        description="A dataset of news classification in three Turkic languages.",
        dataset={
            "path": "Electrotubbie/classification_Turkic_languages",
            "revision": "db1a67c1bdd54fbb8536af026dc8596f00f9c41d",
        },
        reference="https://huggingface.co/datasets/Electrotubbie/classification_Turkic_languages/",
        type="Classification",
        category="s2s",
        eval_splits=["train"],
        eval_langs={
            "ky": ["kir-Cyrl"],
            "kk": ["kaz-Cyrl"],
            "ba": ["bak-Cyrl"],
        },
        main_score="accuracy",
        date=("2023-02-16", "2023-09-03"),
        form=["written"],
        domains=["News"],
        task_subtypes=["Topic classification"],
        license="Not specified",
        socioeconomic_status="low",
        annotations_creators="derived",
        dialect=[],
        text_creation="found",
        bibtex_citation="""
        """,
        n_samples={"train": 193056},
        avg_character_length={"train": 1103.13},
    )

    def transform_data(self, dataset, lang):
        dataset_lang = DatasetDict()
        label_count = Counter(dataset["train"]["label"])
        dataset_lang["train"] = dataset["train"].filter(
            lambda example: example["lang"] == lang
            and label_count[example["label"]] >= 20
        )
        dataset_lang = self.stratified_subsampling(
            dataset_lang, seed=self.seed, splits=["train"]
        )
        return dataset_lang["train"]

    def load_data(self, **kwargs):
        """Load dataset from HuggingFace hub"""
        if self.data_loaded:
            return
        dataset = {}
        metadata = self.metadata_dict.get("dataset", None)
        full_dataset = datasets.load_dataset(**metadata)
        full_dataset = full_dataset.rename_columns(
            {"processed_text": "text", "category": "label"}
        )
        for lang in self.langs:
            dataset[lang] = DatasetDict()
            filtered_dataset = self.transform_data(full_dataset, lang)

            dataset[lang]["train"] = filtered_dataset

        self.dataset = dataset
        self.dataset_transform()
        self.data_loaded = True