File size: 1,714 Bytes
83d24b2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 | from __future__ import annotations
from mteb.abstasks import AbsTaskClassification
from mteb.abstasks.TaskMetadata import TaskMetadata
TEST_SAMPLES = 2048
class PersianFoodSentimentClassification(AbsTaskClassification):
metadata = TaskMetadata(
name="PersianFoodSentimentClassification",
description="Persian Food Review Dataset",
reference="https://hooshvare.github.io/docs/datasets/sa",
dataset={
"path": "asparius/Persian-Food-Sentiment",
"revision": "92ba517dfd22f6334111ad84154d16a2890f5b1d",
},
type="Classification",
category="s2s",
eval_splits=["validation", "test"],
eval_langs=["fas-Arab"],
main_score="accuracy",
date=("2020-01-01", "2020-05-31"),
form=["written"],
domains=["Reviews"],
task_subtypes=["Sentiment/Hate speech"],
license="Not specified",
socioeconomic_status="mixed",
annotations_creators="derived",
dialect=[],
text_creation="found",
bibtex_citation="""
@article{ParsBERT,
title={ParsBERT: Transformer-based Model for Persian Language Understanding},
author={Mehrdad Farahani, Mohammad Gharachorloo, Marzieh Farahani, Mohammad Manthouri},
journal={ArXiv},
year={2020},
volume={abs/2005.12515}
}
""",
n_samples={"validation": TEST_SAMPLES, "test": TEST_SAMPLES},
avg_character_length={"validation": 90.37, "test": 90.58},
)
def dataset_transform(self):
self.dataset = self.stratified_subsampling(
self.dataset, seed=self.seed, splits=["validation", "test"]
)
|