andreyunic23 commited on
Commit
d122bf7
·
verified ·
1 Parent(s): a76b4a0

Upload 2 files

Browse files
Files changed (2) hide show
  1. dataset_infos.json +129 -0
  2. testaunic23.py +181 -0
dataset_infos.json ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "testaunic23": {
3
+ "description": "test testaunic23",
4
+ "citation": "test testaunic23",
5
+ "homepage": "",
6
+ "license": "",
7
+ "features": {
8
+ "id": { "dtype": "string", "id": null, "_type": "Value" },
9
+ "tokens": {
10
+ "feature": { "dtype": "string", "id": null, "_type": "Value" },
11
+ "length": -1,
12
+ "id": null,
13
+ "_type": "Sequence"
14
+ },
15
+ "pos_tags": {
16
+ "feature": {
17
+ "num_classes": 47,
18
+ "names": [
19
+ "\"",
20
+ "''",
21
+ "#",
22
+ "$",
23
+ "(",
24
+ ")",
25
+ ",",
26
+ ".",
27
+ ":",
28
+ "``",
29
+ "CC",
30
+ "CD",
31
+ "DT",
32
+ "EX",
33
+ "FW",
34
+ "IN",
35
+ "JJ",
36
+ "JJR",
37
+ "JJS",
38
+ "LS",
39
+ "MD",
40
+ "NN",
41
+ "NNP",
42
+ "NNPS",
43
+ "NNS",
44
+ "NN|SYM",
45
+ "PDT",
46
+ "POS",
47
+ "PRP",
48
+ "PRP$",
49
+ "RB",
50
+ "RBR",
51
+ "RBS",
52
+ "RP",
53
+ "SYM",
54
+ "TO",
55
+ "UH",
56
+ "VB",
57
+ "VBD",
58
+ "VBG",
59
+ "VBN",
60
+ "VBP",
61
+ "VBZ",
62
+ "WDT",
63
+ "WP",
64
+ "WP$",
65
+ "WRB"
66
+ ],
67
+ "names_file": null,
68
+ "id": null,
69
+ "_type": "ClassLabel"
70
+ },
71
+ "length": -1,
72
+ "id": null,
73
+ "_type": "Sequence"
74
+ },
75
+ "ner_tags": {
76
+ "feature": {
77
+ "num_classes": 11,
78
+ "names": [
79
+ "O",
80
+ "B-ENTITY",
81
+ "I-ENTITY",
82
+ "B-SYSTEM",
83
+ "I-SYSTEM",
84
+ "B-DOCUMENT",
85
+ "I-DOCUMENT",
86
+ "B-ORG",
87
+ "I-ORG",
88
+ "B-LOC",
89
+ "I-LOC"
90
+ ],
91
+ "names_file": null,
92
+ "id": null,
93
+ "_type": "ClassLabel"
94
+ },
95
+ "length": -1,
96
+ "id": null,
97
+ "_type": "Sequence"
98
+ }
99
+ },
100
+ "post_processed": null,
101
+ "supervised_keys": null,
102
+ "builder_name": "testaunic23",
103
+ "config_name": "testaunic23",
104
+ "version": {
105
+ "version_str": "1.0.0",
106
+ "description": null,
107
+ "major": 1,
108
+ "minor": 0,
109
+ "patch": 0
110
+ },
111
+ "splits": {
112
+ "train": {
113
+ "name": "train",
114
+ "num_examples": 249,
115
+ "dataset_name": "testaunic23"
116
+ },
117
+ "validation": {
118
+ "name": "validation",
119
+ "num_examples": 32,
120
+ "dataset_name": "testaunic23"
121
+ },
122
+ "test": {
123
+ "name": "test",
124
+ "num_examples": 32,
125
+ "dataset_name": "testaunic23"
126
+ }
127
+ }
128
+ }
129
+ }
testaunic23.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """test testaunic23"""
18
+
19
+ import logging
20
+
21
+ import datasets
22
+
23
+
24
+ _CITATION = """\
25
+ test testaunic23
26
+ """
27
+
28
+ _DESCRIPTION = """\
29
+ test testaunic23
30
+ """
31
+
32
+ _URL = "https://github.com/andreyokamura-unicamp/test_dataset/"
33
+ _TRAINING_FILE = "train.txt"
34
+ _DEV_FILE = "dev.txt"
35
+ _TEST_FILE = "test.txt"
36
+
37
+
38
+ class testaunic23Config(datasets.BuilderConfig):
39
+ """BuilderConfig for testaunic23"""
40
+
41
+ def __init__(self, **kwargs):
42
+ """BuilderConfig for testaunic23.
43
+ Args:
44
+ **kwargs: keyword arguments forwarded to super.
45
+ """
46
+ super(testaunic23Config, self).__init__(**kwargs)
47
+
48
+
49
+ class testaunic23(datasets.GeneratorBasedBuilder):
50
+ """testaunic23 dataset."""
51
+
52
+ BUILDER_CONFIGS = [
53
+ testaunic23Config(name="testaunic23", version=datasets.Version("1.0.0"), description="testaunic23 dataset"),
54
+ ]
55
+
56
+ def _info(self):
57
+ return datasets.DatasetInfo(
58
+ description=_DESCRIPTION,
59
+ features=datasets.Features(
60
+ {
61
+ "id": datasets.Value("string"),
62
+ "document_id": datasets.Value("int32"),
63
+ "sentence_id": datasets.Value("int32"),
64
+ "tokens": datasets.Sequence(datasets.Value("string")),
65
+ "chunk_tags": datasets.Sequence(
66
+ datasets.features.ClassLabel(
67
+ names=[
68
+ "O",
69
+ "B-ADJP",
70
+ "I-ADJP",
71
+ "B-ADVP",
72
+ "I-ADVP",
73
+ "B-CONJP",
74
+ "I-CONJP",
75
+ "B-INTJ",
76
+ "I-INTJ",
77
+ "B-LST",
78
+ "I-LST",
79
+ "B-NP",
80
+ "I-NP",
81
+ "B-PP",
82
+ "I-PP",
83
+ "B-PRT",
84
+ "I-PRT",
85
+ "B-SBAR",
86
+ "I-SBAR",
87
+ "B-UCP",
88
+ "I-UCP",
89
+ "B-VP",
90
+ "I-VP",
91
+ ]
92
+ )
93
+ ),
94
+ "ner_tags": datasets.Sequence(
95
+ datasets.features.ClassLabel(
96
+ names=[
97
+ "O",
98
+ "B-ENTITY",
99
+ "I-ENTITY",
100
+ "B-SYSTEM",
101
+ "I-SYSTEM",
102
+ "B-DOCUMENT",
103
+ "I-DOCUMENT",
104
+ "B-ORG",
105
+ "I-ORG",
106
+ "B-LOC",
107
+ "I-LOC"
108
+ ]
109
+ )
110
+ ),
111
+ }
112
+ ),
113
+ supervised_keys=None,
114
+ homepage="",
115
+ citation=_CITATION,
116
+ )
117
+
118
+ def _split_generators(self, dl_manager):
119
+ """Returns SplitGenerators."""
120
+ urls_to_download = {
121
+ "train": f"{_URL}{_TRAINING_FILE}",
122
+ "dev": f"{_URL}{_DEV_FILE}",
123
+ "test": f"{_URL}{_TEST_FILE}",
124
+ }
125
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
126
+
127
+ return [
128
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
129
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
130
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
131
+ ]
132
+
133
+ def _generate_examples(self, filepath):
134
+ logging.info("⏳ Generating examples from = %s", filepath)
135
+ with open(filepath, encoding="utf-8") as f:
136
+ guid = 0
137
+ document_id = 0
138
+ sentence_id = 0
139
+ tokens = []
140
+ pos_tags = []
141
+ chunk_tags = []
142
+ ner_tags = []
143
+ for line in f:
144
+ if line.startswith("-DOCSTART-") or line == "" or line == "\n":
145
+ if line.startswith("-DOCSTART-"):
146
+ document_id += 1
147
+ sentence_id = 0
148
+ if tokens:
149
+ yield guid, {
150
+ "id": str(guid),
151
+ "document_id": document_id,
152
+ "sentence_id": sentence_id,
153
+ "tokens": tokens,
154
+ "pos_tags": pos_tags,
155
+ "chunk_tags": chunk_tags,
156
+ "ner_tags": ner_tags,
157
+ }
158
+ sentence_id += 1
159
+ guid += 1
160
+ tokens = []
161
+ pos_tags = []
162
+ chunk_tags = []
163
+ ner_tags = []
164
+ else:
165
+ # conll2003 tokens are space separated
166
+ splits = line.split(" ")
167
+ tokens.append(splits[0])
168
+ pos_tags.append(splits[1])
169
+ chunk_tags.append(splits[2])
170
+ ner_tags.append(splits[3].rstrip())
171
+ # last example
172
+ if tokens:
173
+ yield guid, {
174
+ "id": str(guid),
175
+ "document_id": document_id,
176
+ "sentence_id": sentence_id,
177
+ "tokens": tokens,
178
+ "pos_tags": pos_tags,
179
+ "chunk_tags": chunk_tags,
180
+ "ner_tags": ner_tags,
181
+ }