Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
gabrielaltay commited on
Commit
42e6fa5
1 Parent(s): f139c6c

upload hubscripts/seth_corpus_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. seth_corpus.py +233 -0
seth_corpus.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Descriptions of genetic variations and their effect are widely spread across the biomedical literature. However,
17
+ finding all mentions of a specific variation, or all mentions of variations in a specific gene, is difficult to
18
+ achieve due to the many ways such variations are described. Here, we describe SETH, a tool for the recognition of
19
+ variations from text and their subsequent normalization to dbSNP or UniProt. SETH achieves high precision and recall
20
+ on several evaluation corpora of PubMed abstracts. It is freely available and encompasses stand-alone scripts for
21
+ isolated application and evaluation as well as a thorough documentation for integration into other applications.
22
+ The script loads dataset in bigbio schema (using knowledgebase schema: schemas/kb) AND/OR source (default) schema """
23
+
24
+ from pathlib import Path
25
+ from typing import Dict, List, Tuple
26
+
27
+ import datasets
28
+
29
+ from .bigbiohub import kb_features
30
+ from .bigbiohub import BigBioConfig
31
+ from .bigbiohub import Tasks
32
+
33
+ _LANGUAGES = ['English']
34
+ _PUBMED = True
35
+ _LOCAL = False
36
+ _CITATION = """\
37
+ @Article{SETH2016,
38
+ Title = {SETH detects and normalizes genetic variants in text.},
39
+ Author = {Thomas, Philippe and Rockt{"{a}}schel, Tim and Hakenberg, J{"{o}}rg and Lichtblau, Yvonne and Leser, Ulf},
40
+ Journal = {Bioinformatics},
41
+ Year = {2016},
42
+ Month = {Jun},
43
+ Doi = {10.1093/bioinformatics/btw234},
44
+ Language = {eng},
45
+ Medline-pst = {aheadofprint},
46
+ Pmid = {27256315},
47
+ Url = {http://dx.doi.org/10.1093/bioinformatics/btw234
48
+ }
49
+ """
50
+
51
+ _DATASETNAME = "seth_corpus"
52
+ _DISPLAYNAME = "SETH Corpus"
53
+
54
+ _DESCRIPTION = (
55
+ """SNP named entity recognition corpus consisting of 630 PubMed citations."""
56
+ )
57
+
58
+ _HOMEPAGE = "https://github.com/rockt/SETH"
59
+
60
+ _LICENSE = 'Apache License 2.0'
61
+ _URLS = {
62
+ "source": "https://github.com/rockt/SETH/archive/refs/heads/master.zip",
63
+ "bigbio_kb": "https://github.com/rockt/SETH/archive/refs/heads/master.zip",
64
+ }
65
+
66
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.RELATION_EXTRACTION]
67
+
68
+ _SOURCE_VERSION = "1.0.0"
69
+ _BIGBIO_VERSION = "1.0.0"
70
+
71
+
72
+ class SethCorpusDataset(datasets.GeneratorBasedBuilder):
73
+ """SNP named entity recognition corpus consisting of 630 PubMed citations."""
74
+
75
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
76
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
77
+
78
+ BUILDER_CONFIGS = [
79
+ BigBioConfig(
80
+ name="seth_corpus_source",
81
+ version=SOURCE_VERSION,
82
+ description="SETH corpus source schema",
83
+ schema="source",
84
+ subset_id="seth_corpus",
85
+ ),
86
+ BigBioConfig(
87
+ name="seth_corpus_bigbio_kb",
88
+ version=BIGBIO_VERSION,
89
+ description="SETH corpus BigBio schema",
90
+ schema="bigbio_kb",
91
+ subset_id="seth_corpus",
92
+ ),
93
+ ]
94
+
95
+ DEFAULT_CONFIG_NAME = "seth_corpus_source"
96
+
97
+ def _info(self) -> datasets.DatasetInfo:
98
+
99
+ if self.config.schema == "source":
100
+
101
+ features = datasets.Features(
102
+ {
103
+ "id": datasets.Value("string"),
104
+ "document_id": datasets.Value("string"),
105
+ "text": datasets.Value("string"),
106
+ "text_bound_annotations": [ # T line in brat, e.g. type or event trigger
107
+ {
108
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
109
+ "text": datasets.Sequence(datasets.Value("string")),
110
+ "type": datasets.Value("string"),
111
+ "id": datasets.Value("string"),
112
+ }
113
+ ],
114
+ "events": [ # E line in brat
115
+ {
116
+ "trigger": datasets.Value("string"),
117
+ "id": datasets.Value("string"),
118
+ "type": datasets.Value("string"),
119
+ "arguments": datasets.Sequence(
120
+ {
121
+ "role": datasets.Value("string"),
122
+ "ref_id": datasets.Value("string"),
123
+ }
124
+ ),
125
+ }
126
+ ],
127
+ "relations": [ # R line in brat
128
+ {
129
+ "id": datasets.Value("string"),
130
+ "head": {
131
+ "ref_id": datasets.Value("string"),
132
+ "role": datasets.Value("string"),
133
+ },
134
+ "tail": {
135
+ "ref_id": datasets.Value("string"),
136
+ "role": datasets.Value("string"),
137
+ },
138
+ "type": datasets.Value("string"),
139
+ }
140
+ ],
141
+ "equivalences": [ # Equiv line in brat
142
+ {
143
+ "id": datasets.Value("string"),
144
+ "ref_ids": datasets.Sequence(datasets.Value("string")),
145
+ }
146
+ ],
147
+ "attributes": [ # M or A lines in brat
148
+ {
149
+ "id": datasets.Value("string"),
150
+ "type": datasets.Value("string"),
151
+ "ref_id": datasets.Value("string"),
152
+ "value": datasets.Value("string"),
153
+ }
154
+ ],
155
+ "normalizations": [ # N lines in brat
156
+ {
157
+ "id": datasets.Value("string"),
158
+ "type": datasets.Value("string"),
159
+ "ref_id": datasets.Value("string"),
160
+ "resource_name": datasets.Value("string"),
161
+ "cuid": datasets.Value("string"),
162
+ "text": datasets.Value("string"),
163
+ }
164
+ ],
165
+ },
166
+ )
167
+
168
+ elif self.config.schema == "bigbio_kb":
169
+ features = kb_features
170
+
171
+ return datasets.DatasetInfo(
172
+ description=_DESCRIPTION,
173
+ features=features,
174
+ homepage=_HOMEPAGE,
175
+ license=str(_LICENSE),
176
+ citation=_CITATION,
177
+ )
178
+
179
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
180
+ """Returns SplitGenerators."""
181
+
182
+ urls = _URLS[self.config.schema]
183
+ data_dir = Path(dl_manager.download_and_extract(urls))
184
+
185
+ return [
186
+ datasets.SplitGenerator(
187
+ name=datasets.Split.TRAIN,
188
+ gen_kwargs={
189
+ "filepath": data_dir / "SETH-master" / "resources" / "SETH-corpus",
190
+ "corpus_file": "corpus.txt",
191
+ "split": "train",
192
+ },
193
+ ),
194
+ ]
195
+
196
+ def _generate_examples(
197
+ self, filepath: Path, corpus_file: str, split: str
198
+ ) -> Tuple[int, Dict]:
199
+ """Yields examples as (key, example) tuples."""
200
+
201
+ if self.config.schema == "source":
202
+ with open(filepath / corpus_file, encoding="utf-8") as f:
203
+ contents = f.readlines()
204
+ for guid, content in enumerate(contents):
205
+ file_name, text = content.split("\t")
206
+ example = parsing.parse_brat_file(
207
+ filepath / "annotations" / f"{file_name}.ann"
208
+ )
209
+ example["id"] = str(guid)
210
+ example["text"] = text
211
+ yield guid, example
212
+
213
+ elif self.config.schema == "bigbio_kb":
214
+ with open(filepath / corpus_file, encoding="utf-8") as f:
215
+ contents = f.readlines()
216
+ for guid, content in enumerate(contents):
217
+ file_name, text = content.split("\t")
218
+ example = parsing.parse_brat_file(
219
+ filepath / "annotations" / f"{file_name}.ann"
220
+ )
221
+
222
+ # this example contains event lines
223
+ # but events have not arguments
224
+ # this is most likely an error on the annotation side
225
+ if example["document_id"] == "11058905":
226
+ example["events"] = []
227
+
228
+ example["text"] = text
229
+ example = parsing.brat_parse_to_bigbio_kb(example)
230
+ example["id"] = str(guid)
231
+ yield guid, example
232
+ else:
233
+ raise ValueError(f"Invalid config: {self.config.name}")