| """ NER dataset compiled by T-NER library https://github.com/asahi417/tner/tree/master/tner """ |
| import json |
| from itertools import chain |
| import datasets |
|
|
| logger = datasets.logging.get_logger(__name__) |
| _DESCRIPTION = """[WikiAnn](https://aclanthology.org/P17-1178/)""" |
| _NAME = "wikiann" |
| _VERSION = "1.1.0" |
| _CITATION = """ |
| @inproceedings{pan-etal-2017-cross, |
| title = "Cross-lingual Name Tagging and Linking for 282 Languages", |
| author = "Pan, Xiaoman and |
| Zhang, Boliang and |
| May, Jonathan and |
| Nothman, Joel and |
| Knight, Kevin and |
| Ji, Heng", |
| booktitle = "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)", |
| month = jul, |
| year = "2017", |
| address = "Vancouver, Canada", |
| publisher = "Association for Computational Linguistics", |
| url = "https://aclanthology.org/P17-1178", |
| doi = "10.18653/v1/P17-1178", |
| pages = "1946--1958", |
| abstract = "The ambitious goal of this work is to develop a cross-lingual name tagging and linking framework for 282 languages that exist in Wikipedia. Given a document in any of these languages, our framework is able to identify name mentions, assign a coarse-grained or fine-grained type to each mention, and link it to an English Knowledge Base (KB) if it is linkable. We achieve this goal by performing a series of new KB mining methods: generating {``}silver-standard{''} annotations by transferring annotations from English to other languages through cross-lingual links and KB properties, refining annotations through self-training and topic selection, deriving language-specific morphology features from anchor links, and mining word translation pairs from cross-lingual links. Both name tagging and linking results for 282 languages are promising on Wikipedia data and on-Wikipedia data.", |
| } |
| """ |
|
|
| _HOME_PAGE = "https://github.com/asahi417/tner" |
| _URL = f'https://huggingface.co/datasets/tner/{_NAME}/resolve/main/dataset' |
| _LANGUAGE = ["ace", "bg", "da", "fur", "ilo", "lij", "mzn", "qu", "su", "vi", "af", "bh", "de", "fy", "io", "lmo", "nap", |
| "rm", "sv", "vls", "als", "bn", "diq", "ga", "is", "ln", "nds", "ro", "sw", "vo", "am", "bo", "dv", "gan", "it", |
| "lt", "ne", "ru", "szl", "wa", "an", "br", "el", "gd", "ja", "lv", "nl", "rw", "ta", "war", "ang", "bs", "eml", |
| "gl", "jbo", "map-bms", "nn", "sa", "te", "wuu", "ar", "ca", "en", "gn", "jv", "mg", "no", "sah", "tg", "xmf", |
| "arc", "cbk-zam", "eo", "gu", "ka", "mhr", "nov", "scn", "th", "yi", "arz", "cdo", "es", "hak", "kk", "mi", |
| "oc", "sco", "tk", "yo", "as", "ce", "et", "he", "km", "min", "or", "sd", "tl", "zea", "ast", "ceb", "eu", "hi", |
| "kn", "mk", "os", "sh", "tr", "zh-classical", "ay", "ckb", "ext", "hr", "ko", "ml", "pa", "si", "tt", |
| "zh-min-nan", "az", "co", "fa", "hsb", "ksh", "mn", "pdc", "simple", "ug", "zh-yue", "ba", "crh", "fi", "hu", |
| "ku", "mr", "pl", "sk", "uk", "zh", "bar", "cs", "fiu-vro", "hy", "ky", "ms", "pms", "sl", "ur", "bat-smg", |
| "csb", "fo", "ia", "la", "mt", "pnb", "so", "uz", "be-x-old", "cv", "fr", "id", "lb", "mwl", "ps", "sq", "vec", |
| "be", "cy", "frr", "ig", "li", "my", "pt", "sr", "vep"] |
| _URLS = { |
| l: { |
| str(datasets.Split.TEST): [f'{_URL}/{l}/test.jsonl'], |
| str(datasets.Split.TRAIN): [f'{_URL}/{l}/train.jsonl'], |
| str(datasets.Split.VALIDATION): [f'{_URL}/{l}/dev.jsonl'] |
| } for l in _LANGUAGE |
| } |
|
|
|
|
| class WikiAnnConfig(datasets.BuilderConfig): |
| """BuilderConfig""" |
|
|
| def __init__(self, **kwargs): |
| """BuilderConfig. |
| |
| Args: |
| **kwargs: keyword arguments forwarded to super. |
| """ |
| super(WikiAnnConfig, self).__init__(**kwargs) |
|
|
|
|
| class WikiAnn(datasets.GeneratorBasedBuilder): |
| """Dataset.""" |
|
|
| BUILDER_CONFIGS = [ |
| WikiAnnConfig(name=l, version=datasets.Version(_VERSION), description=f"{_DESCRIPTION} (language: {l})") for l in _LANGUAGE |
| ] |
|
|
| def _split_generators(self, dl_manager): |
| downloaded_file = dl_manager.download_and_extract(_URLS[self.config.name]) |
| return [datasets.SplitGenerator(name=i, gen_kwargs={"filepaths": downloaded_file[str(i)]}) |
| for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]] |
|
|
| def _generate_examples(self, filepaths): |
| _key = 0 |
| for filepath in filepaths: |
| logger.info(f"generating examples from = {filepath}") |
| with open(filepath, encoding="utf-8") as f: |
| _list = [i for i in f.read().split('\n') if len(i) > 0] |
| for i in _list: |
| data = json.loads(i) |
| yield _key, data |
| _key += 1 |
|
|
| def _info(self): |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=datasets.Features( |
| { |
| "tokens": datasets.Sequence(datasets.Value("string")), |
| "tags": datasets.Sequence(datasets.Value("int32")), |
| } |
| ), |
| supervised_keys=None, |
| homepage=_HOME_PAGE, |
| citation=_CITATION, |
| ) |