Datasets:

Languages:
English
License:
gabrielaltay commited on
Commit
6c68295
·
1 Parent(s): c4dfa83

upload hubscripts/ebm_pico_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. ebm_pico.py +338 -0
ebm_pico.py ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ This corpus release contains 4,993 abstracts annotated with (P)articipants,
18
+ (I)nterventions, and (O)utcomes. Training labels are sourced from AMT workers and
19
+ aggregated to reduce noise. Test labels are collected from medical professionals.
20
+ """
21
+
22
+ import os
23
+ from pathlib import Path
24
+ from typing import Dict, List, Tuple, Union
25
+
26
+ import datasets
27
+ from .bigbiohub import kb_features
28
+ from .bigbiohub import BigBioConfig
29
+ from .bigbiohub import Tasks
30
+
31
+ _LANGUAGES = ['English']
32
+ _PUBMED = True
33
+ _LOCAL = False
34
+ _CITATION = """\
35
+ @inproceedings{nye-etal-2018-corpus,
36
+ title = "A Corpus with Multi-Level Annotations of Patients, Interventions and Outcomes to Support Language Processing for Medical Literature",
37
+ author = "Nye, Benjamin and
38
+ Li, Junyi Jessy and
39
+ Patel, Roma and
40
+ Yang, Yinfei and
41
+ Marshall, Iain and
42
+ Nenkova, Ani and
43
+ Wallace, Byron",
44
+ booktitle = "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
45
+ month = jul,
46
+ year = "2018",
47
+ address = "Melbourne, Australia",
48
+ publisher = "Association for Computational Linguistics",
49
+ url = "https://aclanthology.org/P18-1019",
50
+ doi = "10.18653/v1/P18-1019",
51
+ pages = "197--207",
52
+ }
53
+ """
54
+
55
+ _DATASETNAME = "ebm_pico"
56
+ _DISPLAYNAME = "EBM NLP"
57
+
58
+ _DESCRIPTION = """\
59
+ This corpus release contains 4,993 abstracts annotated with (P)articipants,
60
+ (I)nterventions, and (O)utcomes. Training labels are sourced from AMT workers and
61
+ aggregated to reduce noise. Test labels are collected from medical professionals.
62
+ """
63
+
64
+ _HOMEPAGE = "https://github.com/bepnye/EBM-NLP"
65
+
66
+ _LICENSE = 'License information unavailable'
67
+
68
+ _URLS = {
69
+ _DATASETNAME: "https://github.com/bepnye/EBM-NLP/raw/master/ebm_nlp_2_00.tar.gz"
70
+ }
71
+
72
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
73
+
74
+ _SOURCE_VERSION = "2.0.0"
75
+ _BIGBIO_VERSION = "1.0.0"
76
+
77
+ PHASES = ("starting_spans", "hierarchical_labels")
78
+ LABEL_DECODERS = {
79
+ PHASES[0]: {
80
+ "participants": {0: "No Label", 1: "Participant"},
81
+ "interventions": {0: "No Label", 1: "Intervention"},
82
+ "outcomes": {0: "No Label", 1: "Outcome"},
83
+ },
84
+ PHASES[1]: {
85
+ "participants": {
86
+ 0: "No label",
87
+ 1: "Age",
88
+ 2: "Sex",
89
+ 3: "Sample-size",
90
+ 4: "Condition",
91
+ },
92
+ "interventions": {
93
+ 0: "No label",
94
+ 1: "Surgical",
95
+ 2: "Physical",
96
+ 3: "Pharmacological",
97
+ 4: "Educational",
98
+ 5: "Psychological",
99
+ 6: "Other",
100
+ 7: "Control",
101
+ },
102
+ "outcomes": {
103
+ 0: "No label",
104
+ 1: "Physical",
105
+ 2: "Pain",
106
+ 3: "Mortality",
107
+ 4: "Adverse-effects",
108
+ 5: "Mental",
109
+ 6: "Other",
110
+ },
111
+ },
112
+ }
113
+
114
+
115
+ def _get_entities_pico(
116
+ annotation_dict: Dict[str, List[int]],
117
+ tokenized: List[str],
118
+ document_content: str,
119
+ ) -> List[Dict[str, Union[int, str]]]:
120
+ """extract PIO entities from documents using annotation_dict"""
121
+
122
+ def _partition(alist, indices):
123
+ return [alist[i:j] for i, j in zip([0] + indices, indices + [None])]
124
+
125
+ ents = []
126
+ for annotation_type, annotations in annotation_dict.items():
127
+ indices = [idx for idx, val in enumerate(annotations) if val != 0]
128
+
129
+ if len(indices) > 0: # if annotations exist for this sentence
130
+ split_indices = []
131
+ # if there are two annotations of one type in one sentence
132
+ for item_index, item in enumerate(indices):
133
+ if item_index + 1 == len(indices):
134
+ break
135
+ if indices[item_index] + 1 != indices[item_index + 1]:
136
+ split_indices.append(item_index + 1)
137
+ elif annotations[item] != annotations[item + 1]:
138
+ split_indices.append(item_index + 1)
139
+ multiple_indices = _partition(indices, split_indices)
140
+
141
+ for _indices in multiple_indices:
142
+ high_level_type = LABEL_DECODERS["starting_spans"][annotation_type][1]
143
+ fine_grained_type = LABEL_DECODERS["hierarchical_labels"][
144
+ annotation_type
145
+ ][annotations[_indices[0]]]
146
+ annotation_text = " ".join([tokenized[ind] for ind in _indices])
147
+
148
+ char_start = document_content.find(annotation_text)
149
+ char_end = char_start + len(annotation_text)
150
+
151
+ ent = {
152
+ "annotation_text": annotation_text,
153
+ "high_level_annotation_type": high_level_type,
154
+ "fine_grained_annotation_type": fine_grained_type,
155
+ "char_start": char_start,
156
+ "char_end": char_end,
157
+ }
158
+
159
+ ents.append(ent)
160
+ return ents
161
+
162
+
163
+ class EbmPico(datasets.GeneratorBasedBuilder):
164
+ """A Corpus with Multi-Level Annotations of Patients, Interventions and Outcomes to
165
+ Support Language Processing for Medical Literature."""
166
+
167
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
168
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
169
+
170
+ BUILDER_CONFIGS = [
171
+ BigBioConfig(
172
+ name="ebm_pico_source",
173
+ version=SOURCE_VERSION,
174
+ description="ebm_pico source schema",
175
+ schema="source",
176
+ subset_id="ebm_pico",
177
+ ),
178
+ BigBioConfig(
179
+ name="ebm_pico_bigbio_kb",
180
+ version=BIGBIO_VERSION,
181
+ description="ebm_pico BigBio schema",
182
+ schema="bigbio_kb",
183
+ subset_id="ebm_pico",
184
+ ),
185
+ ]
186
+
187
+ DEFAULT_CONFIG_NAME = "ebm_pico_source"
188
+
189
+ def _info(self) -> datasets.DatasetInfo:
190
+
191
+ if self.config.schema == "source":
192
+ features = datasets.Features(
193
+ {
194
+ "doc_id": datasets.Value("string"),
195
+ "text": datasets.Value("string"),
196
+ "entities": [
197
+ {
198
+ "text": datasets.Value("string"),
199
+ "annotation_type": datasets.Value("string"),
200
+ "fine_grained_annotation_type": datasets.Value("string"),
201
+ "start": datasets.Value("int64"),
202
+ "end": datasets.Value("int64"),
203
+ }
204
+ ],
205
+ }
206
+ )
207
+
208
+ elif self.config.schema == "bigbio_kb":
209
+ features = kb_features
210
+ else:
211
+ raise ValueError("config.schema must be either source or bigbio_kb")
212
+
213
+ return datasets.DatasetInfo(
214
+ description=_DESCRIPTION,
215
+ features=features,
216
+ homepage=_HOMEPAGE,
217
+ license=str(_LICENSE),
218
+ citation=_CITATION,
219
+ )
220
+
221
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
222
+ """Returns SplitGenerators."""
223
+
224
+ urls = _URLS[_DATASETNAME]
225
+ data_dir = dl_manager.download_and_extract(urls)
226
+
227
+ documents_folder = Path(data_dir) / "ebm_nlp_2_00" / "documents"
228
+ annotations_folder = (
229
+ Path(data_dir) / "ebm_nlp_2_00" / "annotations" / "aggregated"
230
+ )
231
+ return [
232
+ datasets.SplitGenerator(
233
+ name=datasets.Split.TRAIN,
234
+ gen_kwargs={
235
+ "documents_folder": documents_folder,
236
+ "annotations_folder": annotations_folder,
237
+ "split_folder": "train",
238
+ },
239
+ ),
240
+ datasets.SplitGenerator(
241
+ name=datasets.Split.TEST,
242
+ gen_kwargs={
243
+ "documents_folder": documents_folder,
244
+ "annotations_folder": annotations_folder,
245
+ "split_folder": "test/gold",
246
+ },
247
+ ),
248
+ ]
249
+
250
+ def _generate_examples(
251
+ self, documents_folder, annotations_folder, split_folder: str
252
+ ) -> Tuple[int, Dict]:
253
+ """Yields examples as (key, example) tuples."""
254
+ annotation_types = ["interventions", "outcomes", "participants"]
255
+
256
+ docs_path = os.path.join(
257
+ annotations_folder,
258
+ f"hierarchical_labels/{annotation_types[0]}/{split_folder}/",
259
+ )
260
+ documents_in_split = os.listdir(docs_path)
261
+
262
+ uid = 0
263
+ for id_, document in enumerate(documents_in_split):
264
+ document_id = document.split(".")[0]
265
+ with open(f"{documents_folder}/{document_id}.tokens") as fp:
266
+ tokenized = fp.read().splitlines()
267
+ document_content = " ".join(tokenized)
268
+
269
+ annotation_dict = {}
270
+ for annotation_type in annotation_types:
271
+ try:
272
+ with open(
273
+ f"{annotations_folder}/hierarchical_labels/{annotation_type}/{split_folder}/{document}"
274
+ ) as fp:
275
+ annotation_dict[annotation_type] = [
276
+ int(x) for x in fp.read().splitlines()
277
+ ]
278
+ except OSError:
279
+ annotation_dict[annotation_type] = []
280
+
281
+ ents = _get_entities_pico(
282
+ annotation_dict, tokenized=tokenized, document_content=document_content
283
+ )
284
+
285
+ if self.config.schema == "source":
286
+
287
+ data = {
288
+ "doc_id": document_id,
289
+ "text": document_content,
290
+ "entities": [
291
+ {
292
+ "text": ent["annotation_text"],
293
+ "annotation_type": ent["high_level_annotation_type"],
294
+ "fine_grained_annotation_type": ent[
295
+ "fine_grained_annotation_type"
296
+ ],
297
+ "start": ent["char_start"],
298
+ "end": ent["char_end"],
299
+ }
300
+ for ent in ents
301
+ ],
302
+ }
303
+ yield id_, data
304
+
305
+ elif self.config.schema == "bigbio_kb":
306
+ data = {
307
+ "id": str(uid),
308
+ "document_id": document_id,
309
+ "passages": [],
310
+ "entities": [],
311
+ "relations": [],
312
+ "events": [],
313
+ "coreferences": [],
314
+ }
315
+ uid += 1
316
+
317
+ data["passages"] = [
318
+ {
319
+ "id": str(uid),
320
+ "type": "document",
321
+ "text": [document_content],
322
+ "offsets": [[0, len(document_content)]],
323
+ }
324
+ ]
325
+ uid += 1
326
+
327
+ for ent in ents:
328
+ entity = {
329
+ "id": uid,
330
+ "type": f'{ent["high_level_annotation_type"]}_{ent["fine_grained_annotation_type"]}',
331
+ "text": [ent["annotation_text"]],
332
+ "offsets": [[ent["char_start"], ent["char_end"]]],
333
+ "normalized": [],
334
+ }
335
+ data["entities"].append(entity)
336
+ uid += 1
337
+
338
+ yield uid, data