FEA-Bench / testbed /embeddings-benchmark__mteb /mteb /tasks /Classification /multilingual /LanguageClassification.py
| from __future__ import annotations | |
| from mteb.abstasks.TaskMetadata import TaskMetadata | |
| from ....abstasks import AbsTaskClassification | |
| _LANGUAGES = { | |
| "ar": ["ara-Arab"], | |
| "bg": ["bul-Cyrl"], | |
| "de": ["deu-Latn"], | |
| "el": ["ell-Grek"], | |
| "en": ["eng-Latn"], | |
| "es": ["spa-Latn"], | |
| "fr": ["fra-Latn"], | |
| "hi": ["hin-Deva"], | |
| "it": ["ita-Latn"], | |
| "ja": ["jpn-Jpan"], | |
| "nl": ["nld-Latn"], | |
| "pl": ["pol-Latn"], | |
| "pt": ["por-Latn"], | |
| "ru": ["rus-Cyrl"], | |
| "sw": ["swa-Latn"], | |
| "th": ["tha-Thai"], | |
| "tr": ["tur-Latn"], | |
| "ur": ["urd-Arab"], | |
| "vi": ["vie-Latn"], | |
| "zh": ["cmn-Hans"], | |
| } | |
| class LanguageClassification(AbsTaskClassification): | |
| metadata = TaskMetadata( | |
| name="LanguageClassification", | |
| dataset={ | |
| "path": "papluca/language-identification", | |
| "revision": "aa56583bf2bc52b0565770607d6fc3faebecf9e2", | |
| }, | |
| description="A language identification dataset for 20 languages.", | |
| reference="https://huggingface.co/datasets/papluca/language-identification", | |
| category="s2s", | |
| type="Classification", | |
| eval_splits=["test"], | |
| eval_langs=_LANGUAGES, | |
| main_score="accuracy", | |
| date=("2021-11-01", "2021-11-30"), | |
| form=["written"], | |
| domains=["Reviews", "Web", "Non-fiction", "Fiction", "Government"], | |
| task_subtypes=["Language identification"], | |
| license="Not specified", | |
| socioeconomic_status="mixed", | |
| annotations_creators="derived", | |
| dialect=[], | |
| text_creation="found", | |
| bibtex_citation="", | |
| n_samples={"test": 2048}, | |
| avg_character_length={"test": 107.8}, | |
| ) | |
| def dataset_transform(self) -> None: | |
| self.dataset = self.dataset.rename_columns({"labels": "label"}) | |
| self.dataset = self.stratified_subsampling( | |
| self.dataset, seed=self.seed, splits=["test"] | |
| ) | |