TomTBT commited on
Commit
d78f189
1 Parent(s): d5fb624

Upload pmc_open_access_section.py

Browse files
Files changed (1) hide show
  1. pmc_open_access_section.py +391 -0
pmc_open_access_section.py ADDED
@@ -0,0 +1,391 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ # This dataset script is based on pmc/open_access.py loading script.
17
+
18
+ """PMC Open Access Subset sections parsed (plain text)"""
19
+
20
+ import datetime
21
+ import pandas as pd
22
+ import numpy as np
23
+ from itertools import compress, chain
24
+ from collections import defaultdict
25
+ import re
26
+ from lxml import etree
27
+ import json
28
+ import html
29
+ import unicodedata
30
+
31
+ import datasets
32
+ from datasets.tasks import LanguageModeling
33
+
34
+
35
+ # TODO: Add BibTeX citation
36
+ # Find for instance the citation on arxiv or on the dataset repo/website
37
+ _CITATION = ""
38
+
39
+ _DESCRIPTION = """\
40
+ The PMC Open Access Subset includes more than 3.4 million journal articles and preprints that are made available under
41
+ license terms that allow reuse.
42
+ Not all articles in PMC are available for text mining and other reuse, many have copyright protection, however articles
43
+ in the PMC Open Access Subset are made available under Creative Commons or similar licenses that generally allow more
44
+ liberal redistribution and reuse than a traditional copyrighted work.
45
+ The PMC Open Access Subset is one part of the PMC Article Datasets
46
+
47
+ This version takes XML version as source, benefiting from the structured text
48
+ to split the articles in sections, naming the introduction, methods, results,
49
+ discussion and conclusion, front, body and back. XML is then removed and format
50
+ it to plain text.
51
+ """
52
+
53
+ _HOMEPAGE = "https://www.ncbi.nlm.nih.gov/pmc/tools/openftlist/"
54
+
55
+ # TODO: Add the licence for the dataset here if you can find it
56
+ _LICENSE = """
57
+ https://www.ncbi.nlm.nih.gov/pmc/about/copyright/
58
+
59
+ Within the PMC Open Access Subset, there are three groupings:
60
+
61
+ Commercial Use Allowed - CC0, CC BY, CC BY-SA, CC BY-ND licenses
62
+ Non-Commercial Use Only - CC BY-NC, CC BY-NC-SA, CC BY-NC-ND licenses; and
63
+ Other - no machine-readable Creative Commons license, no license, or a custom license.
64
+ """
65
+
66
+ _URL_ROOT = "https://ftp.ncbi.nlm.nih.gov/pub/pmc/"
67
+ _URL = _URL_ROOT+"oa_bulk/{subset}/xml/"
68
+
69
+ _SUBSETS = {
70
+ "commercial": "oa_comm",
71
+ "non_commercial": "oa_noncomm",
72
+ "other": "oa_other",
73
+ }
74
+ _BASELINE_DATE = "2022-11-18"
75
+
76
+ begin_doc_rgx = re.compile("""<!DOCTYPE.*""")
77
+ def clean_raw(xml_text):
78
+ """
79
+ Fixes the formating of xml of files and returns it.
80
+ Some have bad formating but they can be fixed/improved
81
+ """
82
+ #Some XML can't be parsed because they are not starting with the DOCTYPE declaration
83
+ # Could be disabled if we handle the parsing error (TBD, how many files would be trashed)
84
+
85
+ begin_doc = begin_doc_rgx.search(xml_text)
86
+ xml_text = xml_text[begin_doc.start():]
87
+
88
+ #Some XML are poisoned with consecutive tabs and new lines
89
+ xml_text = re.sub('\s+',' ',xml_text)
90
+ return xml_text
91
+
92
+ def construct_datadict(article_tree):
93
+ """
94
+ Where the magic happens. A long script that:
95
+ - Remove the references (and what is referenced to) from the text
96
+ - Extract paragraphs and titles with their path in the document
97
+ - Titles are used to identify ["introduction", "methods", "results" and "discussion"]
98
+ - The path are then used to group paragraphs and titles into corresponding content.
99
+ - Remaining p and title are put in three other section: front, body, back
100
+
101
+ Returns:
102
+ - content_d: Dictionnary with the content result
103
+
104
+ Useful information about the tags can be found here: https://jats.nlm.nih.gov/archiving/tag-library/1.3/
105
+ """
106
+ res_content_d = {}
107
+
108
+ refs_el = article_tree.find(".//ref-list")
109
+ if refs_el is not None:
110
+ refs_el.getparent().remove(refs_el)
111
+
112
+ # One big query is faster than multiple small ones
113
+ ref_el_l = article_tree.xpath(".//fig|.//table-wrap|.//array|.//supplementary-material\
114
+ |.//inline-supplementary-material|.//disp-formula\
115
+ |.//inline-formula|.//graphic|.//inline-graphic\
116
+ |.//media|.//inline-media|.//boxed-text\
117
+ |.//table-wrap-foot|.//fn-group|.//chem-struct-wrap\
118
+ |.//code|.//disp-quote|.//speech")
119
+ for el in ref_el_l[::-1]:
120
+ repl_xref = etree.Element("xref")
121
+ repl_xref.tail = el.tail
122
+ el.addprevious(repl_xref)
123
+ el.getparent().remove(el)
124
+
125
+ path_l, text_l = [], []
126
+ t_paths, t_texts_lowcase = [], []
127
+ for part in ["front", "body", "back"]: #Iterate parts and insert first front and back
128
+ tmp_path_l, tmp_text_l = [], []
129
+ tmp_t_paths, tmp_t_texts_lowcase = [], []
130
+ part_el = article_tree.find(".//"+part)
131
+ if part_el is None:
132
+ res_content_d[part] = []
133
+ continue
134
+ #Only the outermost p are kept, to prevent duplication.
135
+ #Also seen title with p inside. not(ancestor::title) prevents duplication of that p
136
+ for el in part_el.xpath(".//p[not(ancestor::p) and not(ancestor::title)]| .//title[not(ancestor::p) and not(ancestor::title)]"):
137
+ new_text = " ".join(el.itertext())
138
+ new_text = unicodedata.normalize("NFKD", html.unescape(new_text))
139
+ tmp_path_l.append(article_tree.getelementpath(el))
140
+ tmp_text_l.append(new_text)
141
+ if el.tag=="title":
142
+ tmp_t_paths.append(tmp_path_l[-1])
143
+ tmp_t_texts_lowcase.append(new_text.lower())
144
+ if part=="body": #We keep the body for processing right bellow.
145
+ path_l, text_l = tmp_path_l, tmp_text_l
146
+ t_paths, t_texts_lowcase = tmp_t_paths, tmp_t_texts_lowcase
147
+ else:
148
+ res_content_d[part] = tmp_text_l
149
+
150
+ # Figuring from the titles which are the different categories
151
+ mask_intro = np.array(["introduction" in t_text or "background" in t_text for t_text in t_texts_lowcase]).astype(bool)
152
+ mask_metho = np.array(["method" in t_text for t_text in t_texts_lowcase]).astype(bool)
153
+ mask_resul = np.array(["result" in t_text for t_text in t_texts_lowcase]).astype(bool)
154
+ mask_discu = np.array(["discussion" in t_text for t_text in t_texts_lowcase]).astype(bool)
155
+ mask_concl = np.array(["conclusion" in t_text for t_text in t_texts_lowcase]).astype(bool)
156
+ processed_mask = np.zeros(len(text_l), dtype="bool")
157
+ for mask, name_section in zip([mask_intro, mask_metho, mask_resul, mask_discu, mask_concl],
158
+ ["introduction", "methods", "results", "discussion", "conclusion"]):
159
+ if not np.any(mask):
160
+ res_content_d[name_section] = []
161
+ continue
162
+
163
+ filtered_path_l = list(compress(t_paths, mask))
164
+ levels = np.array([len(path.split("/")) for path in filtered_path_l])
165
+ root_path = filtered_path_l[np.argmin(levels)]
166
+ root_path = root_path[:root_path.rindex("/")]
167
+ mask_contents = np.array([path.startswith(root_path) for path in path_l]).astype(bool)
168
+ processed_mask |= mask_contents
169
+ res_content_d[name_section] = list(compress(text_l, mask_contents))
170
+
171
+ processed_mask = ~processed_mask #Finally, add the body part as everything that don't belong to previous categories
172
+ res_content_d["body"] = list(compress(text_l, processed_mask))
173
+
174
+ return res_content_d
175
+
176
+ class OpenAccessXMLConfig(datasets.BuilderConfig):
177
+ """BuilderConfig for the PMC Open Access Subset."""
178
+
179
+ def __init__(self, subsets=None, **kwargs):
180
+ """BuilderConfig for the PMC Open Access Subset.
181
+ Args:
182
+ subsets (:obj:`List[str]`): List of subsets/groups to load.
183
+ **kwargs: Keyword arguments forwarded to super.
184
+ """
185
+ subsets = [subsets] if isinstance(subsets, str) else subsets
186
+ super().__init__(
187
+ name="+".join(subsets), **kwargs,
188
+ )
189
+ self.subsets = subsets if self.name != "all" else list(_SUBSETS.keys())
190
+
191
+
192
+ class OpenAccessXML(datasets.GeneratorBasedBuilder):
193
+ """PMC Open Access Subset enriched from XML files."""
194
+
195
+ VERSION = datasets.Version("1.0.0")
196
+ BUILDER_CONFIG_CLASS = OpenAccessXMLConfig
197
+ BUILDER_CONFIGS = [OpenAccessXMLConfig(subsets="all")] + [OpenAccessXMLConfig(subsets=subset) for subset in _SUBSETS]
198
+ DEFAULT_CONFIG_NAME = "all"
199
+
200
+ def _info(self):
201
+ return datasets.DatasetInfo(
202
+ description=_DESCRIPTION,
203
+ features=datasets.Features(
204
+ {
205
+ "accession_id": datasets.Value("string"),
206
+ "pmid": datasets.Value("string"),
207
+
208
+ "introduction": datasets.Value("string"),
209
+ "methods": datasets.Value("string"),
210
+ "results": datasets.Value("string"),
211
+ "discussion": datasets.Value("string"),
212
+ "conclusion": datasets.Value("string"),
213
+
214
+ "front": datasets.Value("string"),
215
+ "body": datasets.Value("string"),
216
+ "back": datasets.Value("string"),
217
+
218
+ "license": datasets.Value("string"),
219
+ "retracted": datasets.Value("string"),
220
+ "last_updated": datasets.Value("string"),
221
+ "citation": datasets.Value("string"),
222
+ "package_file": datasets.Value("string"),
223
+ }
224
+ ),
225
+ homepage=_HOMEPAGE,
226
+ license=_LICENSE,
227
+ citation=_CITATION,
228
+ task_templates=[LanguageModeling(text_column="content")],
229
+ )
230
+
231
+ def _split_generators(self, dl_manager):
232
+
233
+ incremental_paths = {
234
+ "incremental_file_lists": [],
235
+ "incremental_archives": []
236
+ }
237
+
238
+ baseline_package_list = dl_manager.download(f"{_URL_ROOT}oa_file_list.csv")
239
+
240
+ baseline_file_lists = []
241
+ baseline_archives = []
242
+ for subset in self.config.subsets:
243
+ url = _URL.format(subset=_SUBSETS[subset])
244
+ basename = f"{_SUBSETS[subset]}_xml."
245
+ # Baselines non-commercial PMC000xxxxxx baseline does not exist
246
+ baselines = [f"PMC00{i}xxxxxx.baseline.{_BASELINE_DATE}" for i in range(10) if (subset != "non_commercial" or i > 0)]
247
+
248
+ for baseline in baselines:
249
+ baseline_file_list_url = f"{url}{basename}{baseline}.filelist.csv"
250
+ baseline_archive_url = f"{url}{basename}{baseline}.tar.gz"
251
+ baseline_file_list = dl_manager.download(baseline_file_list_url)
252
+ baseline_archive = dl_manager.download(baseline_archive_url)
253
+
254
+ baseline_file_lists.append(baseline_file_list)
255
+ baseline_archives.append(baseline_archive)
256
+
257
+ baseline_file_list_url = f"{url}{basename}{baseline}.filelist.csv"
258
+
259
+ # Incremental commented because some articles are already in the main parts (updates?)
260
+ # Need to find a way to add them to the dataset without duplicating the articles.
261
+ # Also adding them would mean that each new day the dataset is loaded, the whole dataset is recreated.
262
+ date_delta = datetime.date.today() - datetime.date.fromisoformat(_BASELINE_DATE)
263
+ incremental_dates = [
264
+ (datetime.date.fromisoformat(_BASELINE_DATE) + datetime.timedelta(days=i + 1)).isoformat()
265
+ for i in range(date_delta.days)
266
+ ]
267
+ incrementals = [f"incr.{date}" for date in incremental_dates]
268
+ for incremental in incrementals:
269
+ incremental_file_list_url = f"{url}{basename}{incremental}.filelist.csv"
270
+ incremental_archive_url = f"{url}{basename}{incremental}.tar.gz"
271
+ try:
272
+ incremental_file_list = dl_manager.download(incremental_file_list_url)
273
+ incremental_archive = dl_manager.download(incremental_archive_url)
274
+ except FileNotFoundError: # Some increment might not exist
275
+ continue
276
+ incremental_paths["incremental_file_lists"].append(incremental_file_list)
277
+ incremental_paths["incremental_archives"].append(incremental_archive)
278
+
279
+ return [
280
+ datasets.SplitGenerator(
281
+ name=datasets.Split.TRAIN,
282
+ gen_kwargs={
283
+ "baseline_file_lists": baseline_file_lists,
284
+ "baseline_archives": [dl_manager.iter_archive(archive) for archive in baseline_archives],
285
+ "baseline_package_list": baseline_package_list,
286
+ "incremental_file_lists": incremental_paths["incremental_file_lists"],
287
+ "incremental_archives": [dl_manager.iter_archive(archive) for archive in incremental_paths["incremental_archives"]],
288
+ },
289
+ ),
290
+ ]
291
+
292
+ def _generate_examples(self, baseline_file_lists, baseline_archives, baseline_package_list, incremental_file_lists, incremental_archives):
293
+ #Loading the file listing folders of individual PMC Article package (with medias and graphics)
294
+ oa_package_list = pd.read_csv(baseline_package_list, index_col="Accession ID")
295
+ oa_package_list = oa_package_list[["File"]]
296
+ oa_package_list.sort_index(inplace=True)
297
+ processed_ids = set()
298
+
299
+ # Incrementals
300
+ if incremental_file_lists:
301
+ for incremental_file_list, incremental_archive in zip(incremental_file_lists[::-1], incremental_archives[::-1]):
302
+ try:
303
+ incrementals = pd.read_csv(incremental_file_list, index_col="AccessionID")
304
+ except FileNotFoundError: # File not found can happen here in stream mode
305
+ continue
306
+ incrementals = incrementals.join(oa_package_list).reset_index().set_index("Article File")
307
+ incrementals.File = incrementals.File.fillna('')
308
+ incrementals = incrementals.to_dict(orient="index")
309
+
310
+ for path, file in incremental_archive:
311
+ data = incrementals.pop(path)
312
+ pmcid = data["AccessionID"]
313
+ if pmcid in processed_ids: #oa_package_list.loc[pmcid, "yet_processed"]:
314
+ continue
315
+ content = file.read()
316
+ try:
317
+ text = content.decode("utf-8").strip()
318
+ except UnicodeDecodeError as e:
319
+ text = content.decode("latin-1").strip()
320
+ text = clean_raw(text)
321
+ try:
322
+ article_tree = etree.ElementTree(etree.fromstring(text))
323
+ except etree.XMLSyntaxError: #In some files, xml is broken
324
+ continue
325
+
326
+ content_d = construct_datadict(article_tree)
327
+ data = {
328
+ "introduction": "\n".join(content_d["introduction"]),
329
+ "methods": "\n".join(content_d["methods"]),
330
+ "results": "\n".join(content_d["results"]),
331
+ "discussion": "\n".join(content_d["discussion"]),
332
+ "conclusion": "\n".join(content_d["conclusion"]),
333
+ "front": "\n".join(content_d["front"]),
334
+ "body": "\n".join(content_d["body"]),
335
+ "back": "\n".join(content_d["back"]),
336
+ "pmid": data["PMID"],
337
+ "accession_id": pmcid,
338
+ "license": data["License"],
339
+ "last_updated": data["LastUpdated (YYYY-MM-DD HH:MM:SS)"],
340
+ "retracted": data["Retracted"],
341
+ "citation": data["Article Citation"],
342
+ "package_file": data["File"],
343
+ }
344
+ processed_ids.add(pmcid)
345
+ yield pmcid, data
346
+
347
+ # Baselines
348
+ for baseline_file_list, baseline_archive in zip(baseline_file_lists, baseline_archives):
349
+
350
+ #try:
351
+ baselines = pd.read_csv(baseline_file_list, index_col="AccessionID")
352
+ baselines = baselines.join(oa_package_list).reset_index().set_index("Article File")
353
+ baselines.File = baselines.File.fillna('')
354
+ baselines = baselines.to_dict(orient="index")
355
+
356
+ for path, file in baseline_archive:
357
+ data = baselines.pop(path)
358
+ pmcid = data["AccessionID"]
359
+ if pmcid in processed_ids:
360
+ continue
361
+ content = file.read()
362
+ try:
363
+ text = content.decode("utf-8").strip()
364
+ except UnicodeDecodeError as e:
365
+ text = content.decode("latin-1").strip()
366
+ text = clean_raw(text)
367
+ try:
368
+ article_tree = etree.ElementTree(etree.fromstring(text))
369
+ except etree.XMLSyntaxError: #In some files, xml is broken
370
+ continue
371
+
372
+ content_d = construct_datadict(article_tree)
373
+ data = {
374
+ "introduction": "\n".join(content_d["introduction"]),
375
+ "methods": "\n".join(content_d["methods"]),
376
+ "results": "\n".join(content_d["results"]),
377
+ "discussion": "\n".join(content_d["discussion"]),
378
+ "conclusion": "\n".join(content_d["conclusion"]),
379
+ "front": "\n".join(content_d["front"]),
380
+ "body": "\n".join(content_d["body"]),
381
+ "back": "\n".join(content_d["back"]),
382
+ "pmid": data["PMID"],
383
+ "accession_id": pmcid,
384
+ "license": data["License"],
385
+ "last_updated": data["LastUpdated (YYYY-MM-DD HH:MM:SS)"],
386
+ "retracted": data["Retracted"],
387
+ "citation": data["Article Citation"],
388
+ "package_file": data["File"],
389
+ }
390
+ processed_ids.add(pmcid)
391
+ yield pmcid, data