gabrielaltay commited on
Commit
4ca9b09
1 Parent(s): ddcebb9

upload hubscripts/essai_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. essai.py +221 -0
essai.py ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import datasets
4
+ import numpy as np
5
+ import pandas as pd
6
+
7
+ from .bigbiohub import text_features
8
+ from .bigbiohub import BigBioConfig
9
+ from .bigbiohub import Tasks
10
+
11
+ _LANGUAGES = ['French']
12
+ _PUBMED = False
13
+ _LOCAL = True
14
+ _CITATION = """\
15
+ @misc{dalloux, title={Datasets – Clément Dalloux}, url={http://clementdalloux.fr/?page_id=28}, journal={Clément Dalloux}, author={Dalloux, Clément}}
16
+ """
17
+
18
+ _DATASETNAME = "essai"
19
+ _DISPLAYNAME = "ESSAI"
20
+
21
+ _DESCRIPTION = """\
22
+ We manually annotated two corpora from the biomedical field. The ESSAI corpus \
23
+ contains clinical trial protocols in French. They were mainly obtained from the \
24
+ National Cancer Institute The typical protocol consists of two parts: the \
25
+ summary of the trial, which indicates the purpose of the trial and the methods \
26
+ applied; and a detailed description of the trial with the inclusion and \
27
+ exclusion criteria. The CAS corpus contains clinical cases published in \
28
+ scientific literature and training material. They are published in different \
29
+ journals from French-speaking countries (France, Belgium, Switzerland, Canada, \
30
+ African countries, tropical countries) and are related to various medical \
31
+ specialties (cardiology, urology, oncology, obstetrics, pulmonology, \
32
+ gastro-enterology). The purpose of clinical cases is to describe clinical \
33
+ situations of patients. Hence, their content is close to the content of clinical \
34
+ narratives (description of diagnoses, treatments or procedures, evolution, \
35
+ family history, expected audience, etc.). In clinical cases, the negation is \
36
+ frequently used for describing the patient signs, symptoms, and diagnosis. \
37
+ Speculation is present as well but less frequently.
38
+
39
+ This version only contain the annotated ESSAI corpus
40
+ """
41
+
42
+ _HOMEPAGE = "https://clementdalloux.fr/?page_id=28"
43
+
44
+ _LICENSE = 'Data User Agreement'
45
+
46
+ _URLS = {
47
+ "essai_source": "",
48
+ "essai_bigbio_text": "",
49
+ "essai_bigbio_kb": "",
50
+ }
51
+
52
+ _SOURCE_VERSION = "1.0.0"
53
+ _BIGBIO_VERSION = "1.0.0"
54
+
55
+ _SUPPORTED_TASKS = [Tasks.TEXT_CLASSIFICATION]
56
+
57
+
58
+ class ESSAI(datasets.GeneratorBasedBuilder):
59
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
60
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
61
+
62
+ DEFAULT_CONFIG_NAME = "essai_source"
63
+
64
+ BUILDER_CONFIGS = [
65
+ BigBioConfig(
66
+ name="essai_source",
67
+ version=SOURCE_VERSION,
68
+ description="ESSAI source schema",
69
+ schema="source",
70
+ subset_id="essai",
71
+ ),
72
+ BigBioConfig(
73
+ name="essai_bigbio_text",
74
+ version=BIGBIO_VERSION,
75
+ description="ESSAI simplified BigBio schema for negation/speculation classification",
76
+ schema="bigbio_text",
77
+ subset_id="essai",
78
+ ),
79
+ BigBioConfig(
80
+ name="essai_bigbio_kb",
81
+ version=BIGBIO_VERSION,
82
+ description="ESSAI simplified BigBio schema for part-of-speech-tagging",
83
+ schema="bigbio_kb",
84
+ subset_id="essai",
85
+ ),
86
+ ]
87
+
88
+ def _info(self):
89
+ if self.config.schema == "source":
90
+ features = datasets.Features(
91
+ {
92
+ "id": datasets.Value("string"),
93
+ "document_id": datasets.Value("string"),
94
+ "text": [datasets.Value("string")],
95
+ "lemmas": [datasets.Value("string")],
96
+ "POS_tags": [datasets.Value("string")],
97
+ "labels": [datasets.Value("string")],
98
+ }
99
+ )
100
+ elif self.config.schema == "bigbio_text":
101
+ features = text_features
102
+ elif self.config.schema == "bigbio_kb":
103
+ features = kb_features
104
+
105
+ return datasets.DatasetInfo(
106
+ description=_DESCRIPTION,
107
+ features=features,
108
+ supervised_keys=None,
109
+ homepage=_HOMEPAGE,
110
+ license=str(_LICENSE),
111
+ citation=_CITATION,
112
+ )
113
+
114
+ def _split_generators(self, dl_manager):
115
+ if self.config.data_dir is None:
116
+ raise ValueError(
117
+ "This is a local dataset. Please pass the data_dir kwarg to load_dataset."
118
+ )
119
+ else:
120
+ data_dir = self.config.data_dir
121
+ return [
122
+ datasets.SplitGenerator(
123
+ name=datasets.Split.TRAIN,
124
+ gen_kwargs={"datadir": data_dir},
125
+ ),
126
+ ]
127
+
128
+ def _generate_examples(self, datadir):
129
+ key = 0
130
+ for file in ["ESSAI_neg.txt", "ESSAI_spec.txt"]:
131
+ filepath = os.path.join(datadir, file)
132
+ label = "negation" if "neg" in file else "speculation"
133
+ id_docs = []
134
+ id_words = []
135
+ words = []
136
+ lemmas = []
137
+ POS_tags = []
138
+
139
+ with open(filepath) as f:
140
+ for line in f.readlines():
141
+ line_content = line.split("\t")
142
+ if len(line_content) > 1:
143
+ id_docs.append(line_content[0])
144
+ id_words.append(line_content[1])
145
+ words.append(line_content[2])
146
+ lemmas.append(line_content[3])
147
+ POS_tags.append(line_content[4])
148
+
149
+ dic = {
150
+ "id_docs": np.array(list(map(int, id_docs))),
151
+ "id_words": id_words,
152
+ "words": words,
153
+ "lemmas": lemmas,
154
+ "POS_tags": POS_tags,
155
+ }
156
+ if self.config.schema == "source":
157
+ for doc_id in set(dic["id_docs"]):
158
+ idces = np.argwhere(dic["id_docs"] == doc_id)[:, 0]
159
+ text = [dic["words"][id] for id in idces]
160
+ text_lemmas = [dic["lemmas"][id] for id in idces]
161
+ POS_tags_ = [dic["POS_tags"][id] for id in idces]
162
+ yield key, {
163
+ "id": key,
164
+ "document_id": doc_id,
165
+ "text": text,
166
+ "lemmas": text_lemmas,
167
+ "POS_tags": POS_tags_,
168
+ "labels": [label],
169
+ }
170
+ key += 1
171
+ elif self.config.schema == "bigbio_text":
172
+ for doc_id in set(dic["id_docs"]):
173
+ idces = np.argwhere(dic["id_docs"] == doc_id)[:, 0]
174
+ text = " ".join([dic["words"][id] for id in idces])
175
+ yield key, {
176
+ "id": key,
177
+ "document_id": doc_id,
178
+ "text": text,
179
+ "labels": [label],
180
+ }
181
+ key += 1
182
+ elif self.config.schema == "bigbio_kb":
183
+ for doc_id in set(dic["id_docs"]):
184
+ idces = np.argwhere(dic["id_docs"] == doc_id)[:, 0]
185
+ text = [dic["words"][id] for id in idces]
186
+ POS_tags_ = [dic["POS_tags"][id] for id in idces]
187
+
188
+ data = {
189
+ "id": str(key),
190
+ "document_id": doc_id,
191
+ "passages": [],
192
+ "entities": [],
193
+ "relations": [],
194
+ "events": [],
195
+ "coreferences": [],
196
+ }
197
+ key += 1
198
+
199
+ data["passages"] = [
200
+ {
201
+ "id": str(key + i),
202
+ "type": "sentence",
203
+ "text": [text[i]],
204
+ "offsets": [[i, i + 1]],
205
+ }
206
+ for i in range(len(text))
207
+ ]
208
+ key += len(text)
209
+
210
+ for i in range(len(text)):
211
+ entity = {
212
+ "id": key,
213
+ "type": "POS_tag",
214
+ "text": [POS_tags_[i]],
215
+ "offsets": [[i, i + 1]],
216
+ "normalized": [],
217
+ }
218
+ data["entities"].append(entity)
219
+ key += 1
220
+
221
+ yield key, data