ayuhamaro commited on
Commit
10c8af0
·
1 Parent(s): 2336223

Create ws-pos-model-tune.py

Browse files
Files changed (1) hide show
  1. ws-pos-model-tune.py +167 -0
ws-pos-model-tune.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ import datasets
18
+
19
+
20
+ _DESCRIPTION = ""
21
+ _HOMEPAGE_URL = ""
22
+ _CITATION = None
23
+ _TRAIN_URL = "https://huggingface.co/datasets/ayuhamaro/ws-pos-model-tune/raw/main/train"
24
+
25
+
26
+ class NlpModelTune(datasets.GeneratorBasedBuilder):
27
+ VERSION = datasets.Version("1.0.0")
28
+
29
+ def _info(self):
30
+ return datasets.DatasetInfo(
31
+ description=_DESCRIPTION,
32
+ features=datasets.Features(
33
+ {
34
+ "id": datasets.Value("string"),
35
+ "tokens": datasets.Sequence(datasets.Value("string")),
36
+ "ws_tags": datasets.Sequence(
37
+ datasets.features.ClassLabel(
38
+ names=[
39
+ "B",
40
+ "I",
41
+ ]
42
+ )
43
+ ),
44
+ "pos_tags": datasets.Sequence(
45
+ datasets.features.ClassLabel(
46
+ names=[
47
+ "A",
48
+ "Caa",
49
+ "Cab",
50
+ "Cba",
51
+ "Cbb",
52
+ "D",
53
+ "Da",
54
+ "Dfa",
55
+ "Dfb",
56
+ "Di",
57
+ "Dk",
58
+ "DM",
59
+ "I",
60
+ "Na",
61
+ "Nb",
62
+ "Nc",
63
+ "Ncd",
64
+ "Nd",
65
+ "Nep",
66
+ "Neqa",
67
+ "Neqb",
68
+ "Nes",
69
+ "Neu",
70
+ "Nf",
71
+ "Ng",
72
+ "Nh",
73
+ "Nv",
74
+ "P",
75
+ "T",
76
+ "VA",
77
+ "VAC",
78
+ "VB",
79
+ "VC",
80
+ "VCL",
81
+ "VD",
82
+ "VF",
83
+ "VE",
84
+ "VG",
85
+ "VH",
86
+ "VHC",
87
+ "VI",
88
+ "VJ",
89
+ "VK",
90
+ "VL",
91
+ "V_2",
92
+ "DE",
93
+ "SHI",
94
+ "FW",
95
+ "COLONCATEGORY",
96
+ "COMMACATEGORY",
97
+ "DASHCATEGORY",
98
+ "DOTCATEGORY",
99
+ "ETCCATEGORY",
100
+ "EXCLAMATIONCATEGORY",
101
+ "PARENTHESISCATEGORY",
102
+ "PAUSECATEGORY",
103
+ "PERIODCATEGORY",
104
+ "QUESTIONCATEGORY",
105
+ "SEMICOLONCATEGORY",
106
+ "SPCHANGECATEGORY"
107
+ ]
108
+ )
109
+ ),
110
+ },
111
+ ),
112
+ supervised_keys=None,
113
+ homepage=_HOMEPAGE_URL,
114
+ citation=_CITATION,
115
+ )
116
+
117
+ def _split_generators(self, dl_manager):
118
+ train_path = dl_manager.download_and_extract(_TRAIN_URL)
119
+ return [
120
+ datasets.SplitGenerator(
121
+ name=datasets.Split.TRAIN,
122
+ gen_kwargs={"data_path": train_path},
123
+ )
124
+ ]
125
+
126
+ def _generate_examples(self, data_path):
127
+ sentence_counter = 0
128
+ with open(data_path, encoding="utf-8") as f:
129
+ current_words = []
130
+ current_labels = []
131
+ for row in f:
132
+ row = row.rstrip()
133
+ row_split = row.split("\t")
134
+ if len(row_split) == 2:
135
+ token, label = row_split
136
+ current_words.append(token)
137
+ current_labels.append(label)
138
+ else:
139
+ if not current_words:
140
+ continue
141
+ assert len(current_words) == len(current_labels), "word len doesnt match label length"
142
+ sentence = (
143
+ sentence_counter,
144
+ {
145
+ "id": str(sentence_counter),
146
+ "tokens": current_words,
147
+ "ws_tags": current_labels,
148
+ "pos_tags": current_labels,
149
+ },
150
+ )
151
+ sentence_counter += 1
152
+ current_words = []
153
+ current_labels = []
154
+ yield sentence
155
+
156
+ # if something remains:
157
+ if current_words:
158
+ sentence = (
159
+ sentence_counter,
160
+ {
161
+ "id": str(sentence_counter),
162
+ "tokens": current_words,
163
+ "ws_tags": current_labels,
164
+ "pos_tags": current_labels,
165
+ },
166
+ )
167
+ yield sentence