| """ NER dataset compiled by T-NER library https://github.com/asahi417/tner/tree/master/tner """ |
| import json |
| from itertools import chain |
| import datasets |
|
|
| logger = datasets.logging.get_logger(__name__) |
| _DESCRIPTION = """[wikineural](https://aclanthology.org/2021.findings-emnlp.215/)""" |
| _NAME = "wikineural" |
| _VERSION = "1.0.0" |
| _CITATION = """ |
| @inproceedings{tedeschi-etal-2021-wikineural-combined, |
| title = "{W}iki{NE}u{R}al: {C}ombined Neural and Knowledge-based Silver Data Creation for Multilingual {NER}", |
| author = "Tedeschi, Simone and |
| Maiorca, Valentino and |
| Campolungo, Niccol{\`o} and |
| Cecconi, Francesco and |
| Navigli, Roberto", |
| booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2021", |
| month = nov, |
| year = "2021", |
| address = "Punta Cana, Dominican Republic", |
| publisher = "Association for Computational Linguistics", |
| url = "https://aclanthology.org/2021.findings-emnlp.215", |
| doi = "10.18653/v1/2021.findings-emnlp.215", |
| pages = "2521--2533", |
| abstract = "Multilingual Named Entity Recognition (NER) is a key intermediate task which is needed in many areas of NLP. In this paper, we address the well-known issue of data scarcity in NER, especially relevant when moving to a multilingual scenario, and go beyond current approaches to the creation of multilingual silver data for the task. We exploit the texts of Wikipedia and introduce a new methodology based on the effective combination of knowledge-based approaches and neural models, together with a novel domain adaptation technique, to produce high-quality training corpora for NER. We evaluate our datasets extensively on standard benchmarks for NER, yielding substantial improvements up to 6 span-based F1-score points over previous state-of-the-art systems for data creation.", |
| } |
| """ |
|
|
| _HOME_PAGE = "https://github.com/asahi417/tner" |
| _URL = f'https://huggingface.co/datasets/tner/{_NAME}/resolve/main/dataset' |
| _LANGUAGE = ['de', 'en', 'es', 'fr', 'it', 'nl', 'pl', 'pt', 'ru'] |
| _URLS = { |
| l: { |
| str(datasets.Split.TEST): [f'{_URL}/{l}/test.jsonl'], |
| str(datasets.Split.TRAIN): [f'{_URL}/{l}/train.jsonl'], |
| str(datasets.Split.VALIDATION): [f'{_URL}/{l}/dev.jsonl'] |
| } for l in _LANGUAGE |
| } |
|
|
|
|
| class WikiNeuralConfig(datasets.BuilderConfig): |
| """BuilderConfig""" |
|
|
| def __init__(self, **kwargs): |
| """BuilderConfig. |
| |
| Args: |
| **kwargs: keyword arguments forwarded to super. |
| """ |
| super(WikiNeuralConfig, self).__init__(**kwargs) |
|
|
|
|
| class WikiNeural(datasets.GeneratorBasedBuilder): |
| """Dataset.""" |
|
|
| BUILDER_CONFIGS = [ |
| WikiNeuralConfig(name=l, version=datasets.Version(_VERSION), description=f"{_DESCRIPTION} (language: {l})") for l in _LANGUAGE |
| ] |
|
|
| def _split_generators(self, dl_manager): |
| downloaded_file = dl_manager.download_and_extract(_URLS[self.config.name]) |
| return [datasets.SplitGenerator(name=i, gen_kwargs={"filepaths": downloaded_file[str(i)]}) |
| for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]] |
|
|
| def _generate_examples(self, filepaths): |
| _key = 0 |
| for filepath in filepaths: |
| logger.info(f"generating examples from = {filepath}") |
| with open(filepath, encoding="utf-8") as f: |
| _list = [i for i in f.read().split('\n') if len(i) > 0] |
| for i in _list: |
| data = json.loads(i) |
| yield _key, data |
| _key += 1 |
|
|
| def _info(self): |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=datasets.Features( |
| { |
| "tokens": datasets.Sequence(datasets.Value("string")), |
| "tags": datasets.Sequence(datasets.Value("int32")), |
| } |
| ), |
| supervised_keys=None, |
| homepage=_HOME_PAGE, |
| citation=_CITATION, |
| ) |