| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
|
|
| import csv |
| import json |
|
|
| import datasets |
|
|
|
|
| _CITATION = """\ |
| @misc{rybak2022improving, |
| title={Improving Question Answering Performance through Manual Annotation: Costs, Benefits and Strategies}, |
| author={Piotr Rybak and Piotr Przybyła and Maciej Ogrodniczuk}, |
| year={2022}, |
| eprint={2212.08897}, |
| archivePrefix={arXiv}, |
| primaryClass={cs.CL} |
| } |
| """ |
|
|
| _DESCRIPTION = """\ |
| PolQA is the first Polish dataset for OpenQA. It consists of 7,000 questions, 87,525 manually labeled evidence passages, and a corpus of over 7 million candidate passages. |
| """ |
|
|
| _HOMEPAGE = "" |
|
|
| _LICENSE = "" |
|
|
| _FEATURES_PAIRS = datasets.Features( |
| { |
| "question_id": datasets.Value("int32"), |
| "passage_title": datasets.Value("string"), |
| "passage_text": datasets.Value("string"), |
| "passage_wiki": datasets.Value("string"), |
| "passage_id": datasets.Value("string"), |
| "duplicate": datasets.Value("bool"), |
| "question": datasets.Value("string"), |
| "relevant": datasets.Value("bool"), |
| "annotated_by": datasets.Value("string"), |
| "answers": datasets.Value("string"), |
| "question_formulation": datasets.Value("string"), |
| "question_type": datasets.Value("string"), |
| "entity_type": datasets.Value("string"), |
| "entity_subtype": datasets.Value("string"), |
| "split": datasets.Value("string"), |
| "passage_source": datasets.Value("string"), |
| } |
| ) |
|
|
| _FEATURES_PASSAGES = datasets.Features( |
| { |
| "id": datasets.Value("string"), |
| "title": datasets.Value("string"), |
| "text": datasets.Value("string"), |
| } |
| ) |
|
|
| _URLS = { |
| "pairs": { |
| "train": ["data/train.csv"], |
| "validation": ["data/valid.csv"], |
| "test": ["data/test.csv"], |
| }, |
| "passages": { |
| "train": ["data/passages.jsonl"], |
| }, |
| } |
|
|
|
|
| class PolQA(datasets.GeneratorBasedBuilder): |
| """PolQA is the first Polish dataset for OpenQA. It consists of manually labeled QA pairs and a corpus of Wikipedia passages.""" |
|
|
| BUILDER_CONFIGS = list(map(lambda x: datasets.BuilderConfig(name=x, version=datasets.Version("1.0.0")), _URLS.keys())) |
| DEFAULT_CONFIG_NAME = "pairs" |
|
|
| def _info(self): |
| if self.config.name == "pairs": |
| features = _FEATURES_PAIRS |
| else: |
| features = _FEATURES_PASSAGES |
|
|
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=features, |
| homepage=_HOMEPAGE, |
| license=_LICENSE, |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| urls = _URLS[self.config.name] |
| data_dir = dl_manager.download_and_extract(urls) |
| if self.config.name == "pairs": |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={ |
| "filepaths": data_dir["train"], |
| "split": "train", |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| gen_kwargs={ |
| "filepaths": data_dir["validation"], |
| "split": "validation", |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| gen_kwargs={ |
| "filepaths": data_dir["test"], |
| "split": "test", |
| }, |
| ), |
| ] |
| else: |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={ |
| "filepaths": data_dir["train"], |
| "split": "train", |
| }, |
| ), |
| ] |
|
|
| @staticmethod |
| def _parse_bool(text): |
| if text == 'True': |
| return True |
| elif text == 'False': |
| return False |
| else: |
| raise ValueError |
|
|
| def _generate_examples(self, filepaths, split): |
| if self.config.name == "pairs": |
| boolean_features = [name for name, val in _FEATURES_PAIRS.items() if val.dtype == "bool"] |
|
|
| for filepath in filepaths: |
| with open(filepath, encoding="utf-8") as f: |
| data = csv.DictReader(f) |
| for i, row in enumerate(data): |
| for boolean_feature in boolean_features: |
| row[boolean_feature] = self._parse_bool(row[boolean_feature]) |
| yield i, row |
| else: |
| for filepath in filepaths: |
| with open(filepath, encoding="utf-8") as f: |
| for i, row in enumerate(f): |
| parsed_row = json.loads(row) |
| yield i, parsed_row |
|
|