gabrielaltay
commited on
Commit
•
024bcc2
1
Parent(s):
f500cf2
upload hubscripts/psytar_hub.py to hub from bigbio repo
Browse files
psytar.py
ADDED
@@ -0,0 +1,506 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
"""
|
17 |
+
The "Psychiatric Treatment Adverse Reactions" (PsyTAR) dataset contains 891 drugs
|
18 |
+
reviews posted by patients on "askapatient.com", about the effectiveness and adverse
|
19 |
+
drug events associated with Zoloft, Lexapro, Cymbalta, and Effexor XR.
|
20 |
+
|
21 |
+
For each drug review, patient demographics, duration of treatment, and satisfaction
|
22 |
+
with the drugs were reported.
|
23 |
+
|
24 |
+
This dataset can be used for:
|
25 |
+
|
26 |
+
1. (multi-label) sentence classification, across 5 labels:
|
27 |
+
Adverse Drug Reaction (ADR)
|
28 |
+
Withdrawal Symptoms (WDs)
|
29 |
+
Sign/Symptoms/Illness (SSIs)
|
30 |
+
Drug Indications (DIs)
|
31 |
+
Drug Effectiveness (EF)
|
32 |
+
Drug Infectiveness (INF)
|
33 |
+
and Others (not applicable)
|
34 |
+
|
35 |
+
2. Recognition of 5 different types of entity:
|
36 |
+
ADRs (4813 mentions)
|
37 |
+
WDs (590 mentions)
|
38 |
+
SSIs (1219 mentions)
|
39 |
+
DIs (792 mentions)
|
40 |
+
|
41 |
+
In the source schema, systematic annotation with UMLS and SNOMED-CT concepts are provided.
|
42 |
+
"""
|
43 |
+
|
44 |
+
import re
|
45 |
+
from dataclasses import dataclass
|
46 |
+
from pathlib import Path
|
47 |
+
from typing import Dict, List, Tuple
|
48 |
+
|
49 |
+
import datasets
|
50 |
+
import pandas as pd
|
51 |
+
|
52 |
+
from .bigbiohub import kb_features
|
53 |
+
from .bigbiohub import BigBioConfig
|
54 |
+
from .bigbiohub import Tasks
|
55 |
+
|
56 |
+
_LANGUAGES = ['English']
|
57 |
+
_PUBMED = False
|
58 |
+
_LOCAL = True
|
59 |
+
_CITATION = """\
|
60 |
+
@article{Zolnoori2019,
|
61 |
+
author = {Maryam Zolnoori and
|
62 |
+
Kin Wah Fung and
|
63 |
+
Timothy B. Patrick and
|
64 |
+
Paul Fontelo and
|
65 |
+
Hadi Kharrazi and
|
66 |
+
Anthony Faiola and
|
67 |
+
Yi Shuan Shirley Wu and
|
68 |
+
Christina E. Eldredge and
|
69 |
+
Jake Luo and
|
70 |
+
Mike Conway and
|
71 |
+
Jiaxi Zhu and
|
72 |
+
Soo Kyung Park and
|
73 |
+
Kelly Xu and
|
74 |
+
Hamideh Moayyed and
|
75 |
+
Somaieh Goudarzvand},
|
76 |
+
title = {A systematic approach for developing a corpus of patient \
|
77 |
+
reported adverse drug events: A case study for {SSRI} and {SNRI} medications},
|
78 |
+
journal = {Journal of Biomedical Informatics},
|
79 |
+
volume = {90},
|
80 |
+
year = {2019},
|
81 |
+
url = {https://doi.org/10.1016/j.jbi.2018.12.005},
|
82 |
+
doi = {10.1016/j.jbi.2018.12.005},
|
83 |
+
}
|
84 |
+
"""
|
85 |
+
|
86 |
+
_DATASETNAME = "psytar"
|
87 |
+
_DISPLAYNAME = "PsyTAR"
|
88 |
+
|
89 |
+
_DESCRIPTION = """\
|
90 |
+
The "Psychiatric Treatment Adverse Reactions" (PsyTAR) dataset contains 891 drugs
|
91 |
+
reviews posted by patients on "askapatient.com", about the effectiveness and adverse
|
92 |
+
drug events associated with Zoloft, Lexapro, Cymbalta, and Effexor XR.
|
93 |
+
|
94 |
+
This dataset can be used for (multi-label) sentence classification of Adverse Drug
|
95 |
+
Reaction (ADR), Withdrawal Symptoms (WDs), Sign/Symptoms/Illness (SSIs), Drug
|
96 |
+
Indications (DIs), Drug Effectiveness (EF), Drug Infectiveness (INF) and Others, as well
|
97 |
+
as for recognition of 5 different types of named entity (in the categories ADRs, WDs,
|
98 |
+
SSIs and DIs)
|
99 |
+
"""
|
100 |
+
|
101 |
+
_HOMEPAGE = "https://www.askapatient.com/research/pharmacovigilance/corpus-ades-psychiatric-medications.asp"
|
102 |
+
|
103 |
+
_LICENSE = 'Creative Commons Attribution 4.0 International'
|
104 |
+
|
105 |
+
_SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.TEXT_CLASSIFICATION]
|
106 |
+
|
107 |
+
_SOURCE_VERSION = "1.0.0"
|
108 |
+
_BIGBIO_VERSION = "1.0.0"
|
109 |
+
|
110 |
+
|
111 |
+
@dataclass
|
112 |
+
class PsyTARBigBioConfig(BigBioConfig):
|
113 |
+
schema: str = "source"
|
114 |
+
name: str = "psytar_source"
|
115 |
+
version: datasets.Version = _SOURCE_VERSION
|
116 |
+
description: str = "PsyTAR source schema"
|
117 |
+
subset_id: str = "psytar"
|
118 |
+
|
119 |
+
|
120 |
+
class PsyTARDataset(datasets.GeneratorBasedBuilder):
|
121 |
+
"""The PsyTAR dataset contains patient's reviews on the effectiveness and adverse
|
122 |
+
drug events associated with Zoloft, Lexapro, Cymbalta, and Effexor XR."""
|
123 |
+
|
124 |
+
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
|
125 |
+
BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
|
126 |
+
|
127 |
+
BUILDER_CONFIGS = [
|
128 |
+
PsyTARBigBioConfig(
|
129 |
+
name="psytar_source",
|
130 |
+
version=SOURCE_VERSION,
|
131 |
+
description="PsyTAR source schema",
|
132 |
+
schema="source",
|
133 |
+
subset_id="psytar",
|
134 |
+
),
|
135 |
+
PsyTARBigBioConfig(
|
136 |
+
name="psytar_bigbio_kb",
|
137 |
+
version=BIGBIO_VERSION,
|
138 |
+
description="PsyTAR BigBio KB schema",
|
139 |
+
schema="bigbio_kb",
|
140 |
+
subset_id="psytar",
|
141 |
+
),
|
142 |
+
PsyTARBigBioConfig(
|
143 |
+
name="psytar_bigbio_text",
|
144 |
+
version=BIGBIO_VERSION,
|
145 |
+
description="PsyTAR BigBio text classification schema",
|
146 |
+
schema="bigbio_text",
|
147 |
+
subset_id="psytar",
|
148 |
+
),
|
149 |
+
]
|
150 |
+
|
151 |
+
BUILDER_CONFIG_CLASS = PsyTARBigBioConfig
|
152 |
+
|
153 |
+
DEFAULT_CONFIG_NAME = "psytar_source"
|
154 |
+
|
155 |
+
def _info(self) -> datasets.DatasetInfo:
|
156 |
+
|
157 |
+
if self.config.schema == "source":
|
158 |
+
features = datasets.Features(
|
159 |
+
{
|
160 |
+
"id": datasets.Value("string"),
|
161 |
+
"doc_id": datasets.Value("string"),
|
162 |
+
"disorder": datasets.Value("string"),
|
163 |
+
"side_effect": datasets.Value("string"),
|
164 |
+
"comment": datasets.Value("string"),
|
165 |
+
"gender": datasets.Value("string"),
|
166 |
+
"age": datasets.Value("int32"),
|
167 |
+
"dosage_duration": datasets.Value("string"),
|
168 |
+
"date": datasets.Value("string"),
|
169 |
+
"category": datasets.Value("string"),
|
170 |
+
"sentences": [
|
171 |
+
{
|
172 |
+
"text": datasets.Value("string"),
|
173 |
+
"label": datasets.Sequence([datasets.Value("string")]),
|
174 |
+
"findings": datasets.Value("string"),
|
175 |
+
"others": datasets.Value("string"),
|
176 |
+
"rating": datasets.Value("string"),
|
177 |
+
"category": datasets.Value("string"),
|
178 |
+
"entities": [
|
179 |
+
{
|
180 |
+
"text": datasets.Value("string"),
|
181 |
+
"type": datasets.Value("string"),
|
182 |
+
"mild": datasets.Value("string"),
|
183 |
+
"moderate": datasets.Value("string"),
|
184 |
+
"severe": datasets.Value("string"),
|
185 |
+
"persistent": datasets.Value("string"),
|
186 |
+
"non_persistent": datasets.Value("string"),
|
187 |
+
"body_site": datasets.Value("string"),
|
188 |
+
"rating": datasets.Value("string"),
|
189 |
+
"drug": datasets.Value("string"),
|
190 |
+
"class": datasets.Value("string"),
|
191 |
+
"entity_type": datasets.Value("string"),
|
192 |
+
"UMLS": datasets.Sequence(
|
193 |
+
[datasets.Value("string")]
|
194 |
+
),
|
195 |
+
"SNOMED": datasets.Sequence(
|
196 |
+
[datasets.Value("string")]
|
197 |
+
),
|
198 |
+
}
|
199 |
+
],
|
200 |
+
}
|
201 |
+
],
|
202 |
+
}
|
203 |
+
)
|
204 |
+
elif self.config.schema == "bigbio_kb":
|
205 |
+
features = kb_features
|
206 |
+
elif self.config.schema == "bigbio_text":
|
207 |
+
features = text_features
|
208 |
+
|
209 |
+
return datasets.DatasetInfo(
|
210 |
+
description=_DESCRIPTION,
|
211 |
+
features=features,
|
212 |
+
homepage=_HOMEPAGE,
|
213 |
+
license=str(_LICENSE),
|
214 |
+
citation=_CITATION,
|
215 |
+
)
|
216 |
+
|
217 |
+
def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
|
218 |
+
"""Returns SplitGenerators."""
|
219 |
+
if self.config.data_dir is None:
|
220 |
+
raise ValueError(
|
221 |
+
"This is a local dataset. Please pass the data_dir kwarg to load_dataset."
|
222 |
+
)
|
223 |
+
else:
|
224 |
+
data_dir = self.config.data_dir
|
225 |
+
|
226 |
+
return [
|
227 |
+
datasets.SplitGenerator(
|
228 |
+
name=datasets.Split.TRAIN,
|
229 |
+
gen_kwargs={
|
230 |
+
"filepath": Path(data_dir),
|
231 |
+
},
|
232 |
+
),
|
233 |
+
]
|
234 |
+
|
235 |
+
def _extract_labels(self, row):
|
236 |
+
label = [
|
237 |
+
"ADR" * row.ADR,
|
238 |
+
"WD" * row.WD,
|
239 |
+
"EF" * row.EF,
|
240 |
+
"INF" * row.INF,
|
241 |
+
"SSI" * row.SSI,
|
242 |
+
"DI" * row.DI,
|
243 |
+
"Others" * row.others,
|
244 |
+
]
|
245 |
+
label = [_l for _l in label if _l != ""]
|
246 |
+
return label
|
247 |
+
|
248 |
+
def _columns_to_list(self, row, sheet="ADR"):
|
249 |
+
annotations = []
|
250 |
+
for i in range(30 if sheet == "ADR" else 10):
|
251 |
+
annotations.append(row[f"{sheet}{i + 1}"])
|
252 |
+
annotations = [a for a in annotations if not pd.isna(a)]
|
253 |
+
return annotations
|
254 |
+
|
255 |
+
def _columns_to_bigbio_kb(self, row, sheet="ADR"):
|
256 |
+
annotations = []
|
257 |
+
for i in range(30 if sheet == "ADR" else 10):
|
258 |
+
annotation = row[f"{sheet}{i + 1}"]
|
259 |
+
if not pd.isna(annotation):
|
260 |
+
start_index = row.sentences.lower().find(annotation.lower())
|
261 |
+
if start_index != -1:
|
262 |
+
end_index = start_index + len(annotation)
|
263 |
+
entity = {
|
264 |
+
"id": f"T{i+1}",
|
265 |
+
"offsets": [[start_index, end_index]],
|
266 |
+
"text": [annotation],
|
267 |
+
"type": sheet,
|
268 |
+
}
|
269 |
+
|
270 |
+
annotations.append(entity)
|
271 |
+
return annotations
|
272 |
+
|
273 |
+
def _standards_columns_to_list(self, row, standard="UMLS"):
|
274 |
+
standards = {"UMLS": ["UMLS1", "UMLS2"], "SNOMED": ["SNOMED-CT", "SNOMED-CT.1"]}
|
275 |
+
_out_list = []
|
276 |
+
for s in standards[standard]:
|
277 |
+
_out_list.append(row[s])
|
278 |
+
_out_list = [a for a in _out_list if not pd.isna(a)]
|
279 |
+
return _out_list
|
280 |
+
|
281 |
+
def _read_sentence_xlsx(self, filepath: Path) -> pd.DataFrame:
|
282 |
+
sentence_df = pd.read_excel(
|
283 |
+
filepath,
|
284 |
+
sheet_name="Sentence_Labeling",
|
285 |
+
dtype={"drug_id": str, "sentences": str},
|
286 |
+
)
|
287 |
+
|
288 |
+
sentence_df = sentence_df.dropna(subset=["sentences"])
|
289 |
+
sentence_df = sentence_df.loc[
|
290 |
+
sentence_df.sentences.apply(lambda x: len(x.strip())) > 0
|
291 |
+
]
|
292 |
+
sentence_df = sentence_df.fillna(0)
|
293 |
+
|
294 |
+
sentence_df[["ADR", "WD", "EF", "INF", "SSI", "DI"]] = (
|
295 |
+
sentence_df[["ADR", "WD", "EF", "INF", "SSI", "DI"]]
|
296 |
+
.replace(re.compile("[!* ]+"), 1)
|
297 |
+
.astype(int)
|
298 |
+
)
|
299 |
+
|
300 |
+
sentence_df["sentence_index"] = sentence_df["sentence_index"].astype("int32")
|
301 |
+
sentence_df["drug_id"] = sentence_df["drug_id"].astype("str")
|
302 |
+
|
303 |
+
return sentence_df
|
304 |
+
|
305 |
+
def _read_samples_xlsx(self, filepath: Path) -> pd.DataFrame:
|
306 |
+
samples_df = pd.read_excel(
|
307 |
+
filepath, sheet_name="Sample", dtype={"drug_id": str}
|
308 |
+
)
|
309 |
+
samples_df["age"] = samples_df["age"].fillna(0).astype(int)
|
310 |
+
samples_df["drug_id"] = samples_df["drug_id"].astype("str")
|
311 |
+
|
312 |
+
return samples_df
|
313 |
+
|
314 |
+
def _read_identified_xlsx_to_bigbio_kb(self, filepath: Path) -> Dict:
|
315 |
+
sheet_names = ["ADR", "WD", "SSI", "DI"]
|
316 |
+
identified_entities = {}
|
317 |
+
|
318 |
+
for sheet in sheet_names:
|
319 |
+
identified_entities[sheet] = pd.read_excel(
|
320 |
+
filepath, sheet_name=sheet + "_Identified"
|
321 |
+
)
|
322 |
+
identified_entities[sheet]["bigbio_kb"] = identified_entities[sheet].apply(
|
323 |
+
lambda x: self._columns_to_bigbio_kb(x, sheet), axis=1
|
324 |
+
)
|
325 |
+
|
326 |
+
return identified_entities
|
327 |
+
|
328 |
+
TYPE_TO_COLNAME = {"ADR": "ADRs", "DI": "DIs", "SSI": "SSI", "WD": "WDs"}
|
329 |
+
|
330 |
+
def _identified_mapped_xlsx_to_df(self, filepath: Path) -> pd.DataFrame:
|
331 |
+
sheet_names_mapped = [
|
332 |
+
["ADR_Mapped", "ADR"],
|
333 |
+
["WD-Mapped ", "WD"],
|
334 |
+
["SSI_Mapped", "SSI"],
|
335 |
+
["DI_Mapped", "DI"],
|
336 |
+
]
|
337 |
+
|
338 |
+
_mappings = []
|
339 |
+
|
340 |
+
# Read the specific XLSX sheet with _Mapped annotations
|
341 |
+
for sheet, sheet_short in sheet_names_mapped:
|
342 |
+
_df_mapping = pd.read_excel(filepath, sheet_name=sheet)
|
343 |
+
|
344 |
+
# Correcting column names
|
345 |
+
if sheet_short in ["WD"]:
|
346 |
+
_df_mapping = _df_mapping.rename(
|
347 |
+
columns={"sentence_id": "sentence_index"}
|
348 |
+
)
|
349 |
+
|
350 |
+
# Changing column names to allow concatenation
|
351 |
+
_df_mapping = _df_mapping.rename(
|
352 |
+
columns={self.TYPE_TO_COLNAME[sheet_short]: "entity"}
|
353 |
+
)
|
354 |
+
|
355 |
+
# Putting UMLS and SNOMED annotations in a single column
|
356 |
+
_df_mapping["UMLS"] = _df_mapping.apply(
|
357 |
+
lambda x: self._standards_columns_to_list(x), axis=1
|
358 |
+
)
|
359 |
+
_df_mapping["SNOMED"] = _df_mapping.apply(
|
360 |
+
lambda x: self._standards_columns_to_list(x, standard="SNOMED"), axis=1
|
361 |
+
)
|
362 |
+
|
363 |
+
_mappings.append(_df_mapping)
|
364 |
+
|
365 |
+
df_mappings = pd.concat(_mappings).fillna(0)
|
366 |
+
df_mappings["sentence_index"] = df_mappings["sentence_index"].astype("int32")
|
367 |
+
df_mappings["drug_id"] = df_mappings["drug_id"].astype("str")
|
368 |
+
|
369 |
+
return df_mappings
|
370 |
+
|
371 |
+
def _convert_xlsx_to_source(self, filepath: Path) -> Dict:
|
372 |
+
# Read XLSX files
|
373 |
+
df_sentences = self._read_sentence_xlsx(filepath)
|
374 |
+
df_sentences["label"] = df_sentences.apply(
|
375 |
+
lambda x: self._extract_labels(x), axis=1
|
376 |
+
)
|
377 |
+
df_mappings = self._identified_mapped_xlsx_to_df(filepath)
|
378 |
+
df_samples = self._read_samples_xlsx(filepath)
|
379 |
+
|
380 |
+
# Configure indices
|
381 |
+
df_samples = df_samples.set_index("drug_id").sort_index()
|
382 |
+
df_sentences = df_sentences.set_index(
|
383 |
+
["drug_id", "sentence_index"]
|
384 |
+
).sort_index()
|
385 |
+
df_mappings = df_mappings.set_index(["drug_id", "sentence_index"]).sort_index()
|
386 |
+
|
387 |
+
# Iterate over samples
|
388 |
+
for sample_row_id, sample in df_samples.iterrows():
|
389 |
+
sentences = []
|
390 |
+
try:
|
391 |
+
df_sentence_selection = df_sentences.loc[sample_row_id]
|
392 |
+
|
393 |
+
# Iterate over sentences
|
394 |
+
for sentence_row_id, sentence in df_sentence_selection.iterrows():
|
395 |
+
entities = []
|
396 |
+
try:
|
397 |
+
df_mapped_selection = df_mappings.loc[
|
398 |
+
sample_row_id, sentence_row_id
|
399 |
+
]
|
400 |
+
|
401 |
+
# Iterate over entities per sentence
|
402 |
+
for mapped_row_id, row in df_mapped_selection.iterrows():
|
403 |
+
entities.append(
|
404 |
+
{
|
405 |
+
"text": row["entity"],
|
406 |
+
"UMLS": row.UMLS,
|
407 |
+
"SNOMED": row.SNOMED,
|
408 |
+
"entity_type": row.entity_type,
|
409 |
+
"type": row.type,
|
410 |
+
"class": row["class"],
|
411 |
+
"drug": row.drug,
|
412 |
+
"rating": row.rating,
|
413 |
+
"body_site": row["body-site"],
|
414 |
+
"non_persistent": row["not-persistent"],
|
415 |
+
"persistent": row["persistent"],
|
416 |
+
"severe": row.severe,
|
417 |
+
"moderate": row.moderate,
|
418 |
+
"mild": row.mild,
|
419 |
+
}
|
420 |
+
)
|
421 |
+
except KeyError:
|
422 |
+
pass
|
423 |
+
|
424 |
+
sentences.append(
|
425 |
+
{
|
426 |
+
"text": sentence.sentences,
|
427 |
+
"entities": entities,
|
428 |
+
"label": sentence.label,
|
429 |
+
"findings": sentence.Findings,
|
430 |
+
"others": sentence.others,
|
431 |
+
"rating": sentence.rating,
|
432 |
+
"category": sentence.category,
|
433 |
+
}
|
434 |
+
)
|
435 |
+
except KeyError:
|
436 |
+
pass
|
437 |
+
|
438 |
+
example = {
|
439 |
+
"id": sample_row_id,
|
440 |
+
"doc_id": sample_row_id,
|
441 |
+
"disorder": sample.disorder,
|
442 |
+
"side_effect": sample["side-effect"],
|
443 |
+
"comment": sample.comment,
|
444 |
+
"gender": sample.gender,
|
445 |
+
"age": sample.age,
|
446 |
+
"dosage_duration": sample.dosage_duration,
|
447 |
+
"date": str(sample.date),
|
448 |
+
"category": sample.category,
|
449 |
+
"sentences": sentences,
|
450 |
+
}
|
451 |
+
yield example
|
452 |
+
|
453 |
+
def _convert_xlsx_to_bigbio_kb(self, filepath: Path) -> Dict:
|
454 |
+
bigbio_kb = self._read_identified_xlsx_to_bigbio_kb(filepath)
|
455 |
+
|
456 |
+
i_doc = 0
|
457 |
+
for _, df in bigbio_kb.items():
|
458 |
+
for _, row in df.iterrows():
|
459 |
+
text = row.sentences
|
460 |
+
entities = row["bigbio_kb"]
|
461 |
+
doc_id = f"{row['drug_id']}_{row['sentence_index']}_{i_doc}"
|
462 |
+
|
463 |
+
if len(entities) != 0:
|
464 |
+
example = parsing.brat_parse_to_bigbio_kb(
|
465 |
+
{
|
466 |
+
"document_id": doc_id,
|
467 |
+
"text": text,
|
468 |
+
"text_bound_annotations": entities,
|
469 |
+
"normalizations": [],
|
470 |
+
"events": [],
|
471 |
+
"relations": [],
|
472 |
+
"equivalences": [],
|
473 |
+
"attributes": [],
|
474 |
+
},
|
475 |
+
)
|
476 |
+
example["id"] = i_doc
|
477 |
+
i_doc += 1
|
478 |
+
yield example
|
479 |
+
|
480 |
+
def _convert_xlsx_to_bigbio_text(self, filepath: Path) -> Dict:
|
481 |
+
df = self._read_sentence_xlsx(filepath)
|
482 |
+
df["label"] = df.apply(lambda x: self._extract_labels(x), axis=1)
|
483 |
+
|
484 |
+
for idx, row in df.iterrows():
|
485 |
+
example = {
|
486 |
+
"id": idx,
|
487 |
+
"document_id": f"{row['drug_id']}_{row['sentence_index']}",
|
488 |
+
"text": row["label"],
|
489 |
+
"labels": row["category"],
|
490 |
+
}
|
491 |
+
yield example
|
492 |
+
|
493 |
+
def _generate_examples(self, filepath) -> Tuple[int, Dict]:
|
494 |
+
"""Yields examples as (key, example) tuples."""
|
495 |
+
|
496 |
+
if self.config.schema == "source":
|
497 |
+
examples = self._convert_xlsx_to_source(filepath)
|
498 |
+
|
499 |
+
elif self.config.schema == "bigbio_kb":
|
500 |
+
examples = self._convert_xlsx_to_bigbio_kb(filepath)
|
501 |
+
|
502 |
+
elif self.config.schema == "bigbio_text":
|
503 |
+
examples = self._convert_xlsx_to_bigbio_text(filepath)
|
504 |
+
|
505 |
+
for idx, example in enumerate(examples):
|
506 |
+
yield idx, example
|