Datasets:

Modalities:
Text
Languages:
English
Size:
< 1K
Libraries:
Datasets
License:
gabrielaltay commited on
Commit
f0f0969
·
1 Parent(s): caa2e2d

upload hubscripts/bionlp_st_2013_gro_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. bionlp_st_2013_gro.py +240 -0
bionlp_st_2013_gro.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ from pathlib import Path
18
+ from typing import List
19
+
20
+ import datasets
21
+
22
+ from .bigbiohub import kb_features
23
+ from .bigbiohub import BigBioConfig
24
+ from .bigbiohub import Tasks
25
+
26
+ _DATASETNAME = "bionlp_st_2013_gro"
27
+ _DISPLAYNAME = "BioNLP 2013 GRO"
28
+
29
+ _SOURCE_VIEW_NAME = "source"
30
+ _UNIFIED_VIEW_NAME = "bigbio"
31
+
32
+ _LANGUAGES = ['English']
33
+ _PUBMED = True
34
+ _LOCAL = False
35
+ _CITATION = """\
36
+ @inproceedings{kim-etal-2013-gro,
37
+ title = "{GRO} Task: Populating the Gene Regulation Ontology with events and relations",
38
+ author = "Kim, Jung-jae and
39
+ Han, Xu and
40
+ Lee, Vivian and
41
+ Rebholz-Schuhmann, Dietrich",
42
+ booktitle = "Proceedings of the {B}io{NLP} Shared Task 2013 Workshop",
43
+ month = aug,
44
+ year = "2013",
45
+ address = "Sofia, Bulgaria",
46
+ publisher = "Association for Computational Linguistics",
47
+ url = "https://aclanthology.org/W13-2007",
48
+ pages = "50--57",
49
+ }
50
+ """
51
+
52
+ _DESCRIPTION = """\
53
+ GRO Task: Populating the Gene Regulation Ontology with events and
54
+ relations. A data set from the bio NLP shared tasks competition from 2013
55
+ """
56
+
57
+ _HOMEPAGE = "https://github.com/openbiocorpora/bionlp-st-2013-gro"
58
+
59
+ _LICENSE = 'GENIA Project License for Annotated Corpora'
60
+
61
+ _URLs = {
62
+ "source": "https://github.com/openbiocorpora/bionlp-st-2013-gro/archive/refs/heads/master.zip",
63
+ "bigbio_kb": "https://github.com/openbiocorpora/bionlp-st-2013-gro/archive/refs/heads/master.zip",
64
+ }
65
+
66
+ _SUPPORTED_TASKS = [
67
+ Tasks.EVENT_EXTRACTION,
68
+ Tasks.NAMED_ENTITY_RECOGNITION,
69
+ Tasks.RELATION_EXTRACTION,
70
+ ]
71
+ _SOURCE_VERSION = "1.0.0"
72
+ _BIGBIO_VERSION = "1.0.0"
73
+
74
+
75
+ class bionlp_st_2013_gro(datasets.GeneratorBasedBuilder):
76
+ """GRO Task: Populating the Gene Regulation Ontology with events and
77
+ relations. A data set from the bio NLP shared tasks competition from 2013"""
78
+
79
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
80
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
81
+
82
+ BUILDER_CONFIGS = [
83
+ BigBioConfig(
84
+ name="bionlp_st_2013_gro_source",
85
+ version=SOURCE_VERSION,
86
+ description="bionlp_st_2013_gro source schema",
87
+ schema="source",
88
+ subset_id="bionlp_st_2013_gro",
89
+ ),
90
+ BigBioConfig(
91
+ name="bionlp_st_2013_gro_bigbio_kb",
92
+ version=BIGBIO_VERSION,
93
+ description="bionlp_st_2013_gro BigBio schema",
94
+ schema="bigbio_kb",
95
+ subset_id="bionlp_st_2013_gro",
96
+ ),
97
+ ]
98
+
99
+ DEFAULT_CONFIG_NAME = "bionlp_st_2013_gro_source"
100
+
101
+ def _info(self):
102
+ """
103
+ - `features` defines the schema of the parsed data set. The schema depends on the
104
+ chosen `config`: If it is `_SOURCE_VIEW_NAME` the schema is the schema of the
105
+ original data. If `config` is `_UNIFIED_VIEW_NAME`, then the schema is the
106
+ canonical KB-task schema defined in `biomedical/schemas/kb.py`.
107
+ """
108
+ if self.config.schema == "source":
109
+ features = datasets.Features(
110
+ {
111
+ "id": datasets.Value("string"),
112
+ "document_id": datasets.Value("string"),
113
+ "text": datasets.Value("string"),
114
+ "text_bound_annotations": [ # T line in brat, e.g. type or event trigger
115
+ {
116
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
117
+ "text": datasets.Sequence(datasets.Value("string")),
118
+ "type": datasets.Value("string"),
119
+ "id": datasets.Value("string"),
120
+ }
121
+ ],
122
+ "events": [ # E line in brat
123
+ {
124
+ "trigger": datasets.Value(
125
+ "string"
126
+ ), # refers to the text_bound_annotation of the trigger,
127
+ "id": datasets.Value("string"),
128
+ "type": datasets.Value("string"),
129
+ "arguments": datasets.Sequence(
130
+ {
131
+ "role": datasets.Value("string"),
132
+ "ref_id": datasets.Value("string"),
133
+ }
134
+ ),
135
+ }
136
+ ],
137
+ "relations": [ # R line in brat
138
+ {
139
+ "id": datasets.Value("string"),
140
+ "head": {
141
+ "ref_id": datasets.Value("string"),
142
+ "role": datasets.Value("string"),
143
+ },
144
+ "tail": {
145
+ "ref_id": datasets.Value("string"),
146
+ "role": datasets.Value("string"),
147
+ },
148
+ "type": datasets.Value("string"),
149
+ }
150
+ ],
151
+ "equivalences": [ # Equiv line in brat
152
+ {
153
+ "id": datasets.Value("string"),
154
+ "ref_ids": datasets.Sequence(datasets.Value("string")),
155
+ }
156
+ ],
157
+ "attributes": [ # M or A lines in brat
158
+ {
159
+ "id": datasets.Value("string"),
160
+ "type": datasets.Value("string"),
161
+ "ref_id": datasets.Value("string"),
162
+ "value": datasets.Value("string"),
163
+ }
164
+ ],
165
+ "normalizations": [ # N lines in brat
166
+ {
167
+ "id": datasets.Value("string"),
168
+ "type": datasets.Value("string"),
169
+ "ref_id": datasets.Value("string"),
170
+ "resource_name": datasets.Value(
171
+ "string"
172
+ ), # Name of the resource, e.g. "Wikipedia"
173
+ "cuid": datasets.Value(
174
+ "string"
175
+ ), # ID in the resource, e.g. 534366
176
+ "text": datasets.Value(
177
+ "string"
178
+ ), # Human readable description/name of the entity, e.g. "Barack Obama"
179
+ }
180
+ ],
181
+ },
182
+ )
183
+ elif self.config.schema == "bigbio_kb":
184
+ features = kb_features
185
+
186
+ return datasets.DatasetInfo(
187
+ description=_DESCRIPTION,
188
+ features=features,
189
+ homepage=_HOMEPAGE,
190
+ license=str(_LICENSE),
191
+ citation=_CITATION,
192
+ )
193
+
194
+ def _split_generators(
195
+ self, dl_manager: datasets.DownloadManager
196
+ ) -> List[datasets.SplitGenerator]:
197
+
198
+ my_urls = _URLs[self.config.schema]
199
+ data_dir = Path(dl_manager.download_and_extract(my_urls))
200
+ data_files = {
201
+ "train": data_dir
202
+ / f"bionlp-st-2013-gro-master"
203
+ / "original-data"
204
+ / "train",
205
+ "dev": data_dir / f"bionlp-st-2013-gro-master" / "original-data" / "devel",
206
+ "test": data_dir / f"bionlp-st-2013-gro-master" / "original-data" / "test",
207
+ }
208
+
209
+ return [
210
+ datasets.SplitGenerator(
211
+ name=datasets.Split.TRAIN,
212
+ gen_kwargs={"data_files": data_files["train"]},
213
+ ),
214
+ datasets.SplitGenerator(
215
+ name=datasets.Split.VALIDATION,
216
+ gen_kwargs={"data_files": data_files["dev"]},
217
+ ),
218
+ datasets.SplitGenerator(
219
+ name=datasets.Split.TEST,
220
+ gen_kwargs={"data_files": data_files["test"]},
221
+ ),
222
+ ]
223
+
224
+ def _generate_examples(self, data_files: Path):
225
+ if self.config.schema == "source":
226
+ txt_files = list(data_files.glob("*txt"))
227
+ for guid, txt_file in enumerate(txt_files):
228
+ example = parsing.parse_brat_file(txt_file)
229
+ example["id"] = str(guid)
230
+ yield guid, example
231
+ elif self.config.schema == "bigbio_kb":
232
+ txt_files = list(data_files.glob("*txt"))
233
+ for guid, txt_file in enumerate(txt_files):
234
+ example = parsing.brat_parse_to_bigbio_kb(
235
+ parsing.parse_brat_file(txt_file)
236
+ )
237
+ example["id"] = str(guid)
238
+ yield guid, example
239
+ else:
240
+ raise ValueError(f"Invalid config: {self.config.name}")