Datasets:

wzkariampuzha commited on
Commit
adb4b39
·
1 Parent(s): f042eb0

Create EpiClassify4GARD.py

Browse files
Files changed (1) hide show
  1. EpiClassify4GARD.py +131 -0
EpiClassify4GARD.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+
18
+ import csv
19
+ import os
20
+ import textwrap
21
+ import datasets
22
+ from datasets.tasks import TextClassification
23
+
24
+ _CITATION = """
25
+ John, J. N., Sid, E., & Zhu, Q. (2021). Recurrent Neural Networks to Automatically Identify Rare Disease Epidemiologic Studies from PubMed. AMIA Joint Summits on Translational Science proceedings. AMIA Joint Summits on Translational Science, 2021, 325–334.
26
+ """
27
+
28
+ _DESCRIPTION = """\
29
+
30
+ [fix description]
31
+
32
+ Prepare positive dataset.ipynb: Generates orphanet_epi_mesh.csv, the final positive dataset (articles that are all epidemiology studies). First, PubMed IDs are extracted from a collection of epidemiology sources provided by Orphanet. The final positive set consists of the PubMed IDs that have epidemiology, incidence, or prevalence MeSH terms. The notebook includes code to optionally expand the dataset by including articles with epidemiology-related MeSH terms beyond those included in the Orphanet file, although this was shown to have worse performance.
33
+ Prepare negative dataset.ipynb: Generates negative_dataset.csv, the final negative dataset (articles that are not epidemiology studies). Using the EBI API, the top 5 PubMed search results for each of the 6,000+ rare diseases included in the GARD database are retrieved. Articles that have epidemiology MeSH terms or keywords in the abstract or that are also in the Orphanet file are removed.
34
+
35
+ negative_dataset.csv: Negative dataset assembled by Prepare negative dataset.ipynb. Columns: PubMed ID, abstract text. 25,015 rows.
36
+ orphanet_epi_mesh.csv: Positive dataset assembled by Prepare positive dataset.ipynb. Columns: PubMed ID, abstract text. 1,145 rows.
37
+ """
38
+ _HOMEPAGE = "https://github.com/ncats/epi4GARD/tree/master#epi4gard"
39
+ _LICENSE = "https://raw.githubusercontent.com/ncats/epi4GARD/master/license.txt"
40
+
41
+ _URL = "https://huggingface.co/datasets/ncats/GARD_EpiSet4TextClassification/raw/main/"
42
+ _TRAINING_FILE = "epi_classify_train.tsv"
43
+ _VAL_FILE = "epi_classify_val.tsv"
44
+ _TEST_FILE = "epi_classify_test.tsv"
45
+
46
+ class EpiClassifyConfig(datasets.BuilderConfig):
47
+ """BuilderConfig for EpiClassify."""
48
+
49
+ def __init__(self, **kwargs):
50
+ """BuilderConfig for EpiClassify.
51
+
52
+ Args:
53
+ **kwargs: keyword arguments forwarded to super.
54
+ """
55
+ super(EpiClassifyConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
56
+
57
+ class EpiClassify(datasets.GeneratorBasedBuilder):
58
+ """The General Language Understanding Evaluation (GLUE) benchmark."""
59
+
60
+ BUILDER_CONFIGS = [
61
+ EpiClassifyConfig(
62
+ name="EpiClassify",
63
+ version=VERSION,
64
+ description=textwrap.dedent(
65
+ """\
66
+ The EpiClassify Dataset [REDO DESCRIPTION The task is to predict the sentiment of a
67
+ given sentence. We use the two-way (positive/negative) class split, and use only
68
+ sentence-level labels.]"""
69
+ ),
70
+ text_features={"abstract": "abstract"},
71
+ label_classes=["negative", "positive"],
72
+ label_column="label",
73
+ #data_url="https://dl.fbaipublicfiles.com/glue/data/SST-2.zip",
74
+ #data_dir="SST-2",
75
+ )
76
+ ]
77
+
78
+ def _info(self):
79
+ #features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()}
80
+
81
+ features = datasets.Features(
82
+ {
83
+ "text": datasets.Value("string"),
84
+ "label": datasets.features.ClassLabel(
85
+ names=[
86
+ "1 = Epi Abstract",
87
+ "2 = Not Epi Abstract",
88
+ ]
89
+ ),
90
+ }
91
+ )
92
+
93
+ '''
94
+ if self.config.label_classes:
95
+ features["label"] = datasets.features.ClassLabel(names=self.config.label_classes)
96
+ else:
97
+ features["label"] = datasets.Value("float32")
98
+ features["idx"] = datasets.Value("int32")
99
+ '''
100
+
101
+ return datasets.DatasetInfo(
102
+ description=_DESCRIPTION,
103
+ features=datasets.Features(features),
104
+ task_templates=[TextClassification(text_column="text", label_column="label")],
105
+ )
106
+
107
+ def _split_generators(self, dl_manager):
108
+ """Returns SplitGenerators."""
109
+ urls_to_download = {
110
+ "train": f"{_URL}{_TRAINING_FILE}",
111
+ "val": f"{_URL}{_VAL_FILE}",
112
+ "test": f"{_URL}{_TEST_FILE}",
113
+ }
114
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
115
+
116
+ return [
117
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
118
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["val"]}),
119
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
120
+ ]
121
+
122
+ def _generate_examples(self, filepath, split):
123
+ """Yields examples."""
124
+
125
+ with open(filepath, encoding="utf-8") as f:
126
+ data = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONNUMERIC)
127
+ for id_, row in enumerate(data):
128
+ yield id_, {
129
+ "text": row[0],
130
+ "label": row[1],
131
+ }