| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """Datasets loading script for wikitext_linked""" |
|
|
| import os |
|
|
| import datasets |
| import pyarrow as pa |
| import pyarrow.parquet as pq |
|
|
|
|
| logger = datasets.utils.logging.get_logger(__name__) |
|
|
|
|
| _CITATION = """\ |
| @misc{merity2016pointer, |
| title={Pointer Sentinel Mixture Models}, |
| author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher}, |
| year={2016}, |
| eprint={1609.07843}, |
| archivePrefix={arXiv}, |
| primaryClass={cs.CL} |
| } |
| |
| @inproceedings{nguyen2021trankit, |
| title={Trankit: A Light-Weight Transformer-based Toolkit for Multilingual Natural Language Processing}, |
| author={Nguyen, Minh Van and Lai, Viet Dac and Veyseh, Amir Pouran Ben and Nguyen, Thien Huu}, |
| booktitle="Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: System Demonstrations", |
| year={2021} |
| } |
| |
| @misc{entity-fishing, |
| title = {entity-fishing}, |
| howpublished = {\\url{https://github.com/kermitt2/entity-fishing}}, |
| publisher = {GitHub}, |
| year = {2016--2022}, |
| archivePrefix = {swh}, |
| eprint = {1:dir:cb0ba3379413db12b0018b7c3af8d0d2d864139c} |
| } |
| """ |
|
|
| _DESCRIPTION = """\ |
| The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified |
| Good and Featured articles on Wikipedia. Dependency Relations, POS, NER tags are marked with trankit and |
| entities are linked with entity-fishing. |
| The dataset is available under the Creative Commons Attribution-ShareAlike License. |
| """ |
|
|
| _HOMEPAGE = "https://github.com/GabrielKP/svo/" |
|
|
| _LICENSE = "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)" |
|
|
|
|
| FEATURES = datasets.Features( |
| { |
| "text": datasets.Value("string"), |
| "original_id": datasets.Value("int64"), |
| "tok_span": datasets.Sequence(feature=datasets.Sequence(feature=datasets.Value("int64"))), |
| "tok_upos": datasets.Sequence(feature=datasets.Value("string")), |
| "tok_xpos": datasets.Sequence(feature=datasets.Value("string")), |
| "tok_dephead": datasets.Sequence(feature=datasets.Value("int64")), |
| "tok_deprel": datasets.Sequence(feature=datasets.Value("string")), |
| "tok_lemma": datasets.Sequence(feature=datasets.Value("string")), |
| "tok_ner": datasets.Sequence(feature=datasets.Value("string")), |
| "ent_span": datasets.Sequence(feature=datasets.Sequence(feature=datasets.Value("int64"))), |
| "ent_wikipedia_external_ref": datasets.Sequence(feature=datasets.Value("string")), |
| "ent_ner": datasets.Sequence(feature=datasets.Value("string")), |
| "ent_domains": datasets.Sequence( |
| feature=datasets.Sequence(feature=datasets.Value("string")) |
| ), |
| } |
| ) |
|
|
| _URL = "https://huggingface.co/datasets/gabrielkp/wikitext_linked/resolve/main/" |
|
|
|
|
| class WikitextLinked(datasets.ArrowBasedBuilder): |
| """wikitext_linked is an annotated and linked version from wikitext. Wikitext is a |
| collection of over 100 million tokens extracted from the set of verified Good and |
| Featured articles on Wikipedia. |
| """ |
|
|
| VERSION = datasets.Version("1.0.0") |
|
|
| BUILDER_CONFIGS = [ |
| datasets.BuilderConfig( |
| name="wikitext2", |
| version=VERSION, |
| description="The small version", |
| data_dir="wikitext2", |
| ), |
| datasets.BuilderConfig( |
| name="wikitext103", |
| version=VERSION, |
| description="The big version", |
| data_dir="wikitext103", |
| ), |
| ] |
|
|
| def _info(self): |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| citation=_CITATION, |
| license=_LICENSE, |
| features=FEATURES, |
| version=self.VERSION, |
| homepage=_HOMEPAGE, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| data_dir = dl_manager.download_and_extract(f"{_URL}{self.config.data_dir}.zip") |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| |
| gen_kwargs={ |
| "filepath": os.path.join(data_dir, self.config.data_dir, "train.parquet"), |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| |
| gen_kwargs={ |
| "filepath": os.path.join(data_dir, self.config.data_dir, "validation.parquet"), |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| |
| gen_kwargs={ |
| "filepath": os.path.join(data_dir, self.config.data_dir, "test.parquet"), |
| }, |
| ), |
| ] |
|
|
| |
| def _generate_tables(self, filepath): |
| schema = pa.schema(FEATURES.type) |
| with open(filepath, "rb") as f: |
| parquet_file = pq.ParquetFile(f) |
| try: |
| for batch_idx, record_batch in enumerate( |
| parquet_file.iter_batches(batch_size=10000, columns=None) |
| ): |
| pa_table = pa.Table.from_batches([record_batch]) |
| pa_table = pa.Table.from_arrays( |
| [pa_table[field.name] for field in schema], schema=schema |
| ) |
| |
| |
| |
| yield f"{batch_idx}", pa_table |
| except ValueError as e: |
| logger.error(f"Failed to read file '{filepath}' with error {type(e)}: {e}") |
| raise |
|
|