moukaii commited on
Commit
30d7412
1 Parent(s): 5f53757

Upload tuberculosis_dataset.py

Browse files
Files changed (1) hide show
  1. tuberculosis_dataset.py +101 -0
tuberculosis_dataset.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import DatasetBuilder, DownloadManager, DatasetInfo, Array3D, BuilderConfig, SplitGenerator, Version, Split
2
+ from datasets.features import Features, Value, Sequence
3
+ import pandas as pd
4
+ import json
5
+ import zipfile
6
+ from PIL import Image
7
+ import numpy as np
8
+
9
+ _DESCRIPTION = """\
10
+ This dataset is curated from the original “The MultiCaRe Dataset” to focus on the chest tuberculosis patients and can be used to develop algorithms of the segmentation of chest CT images and the classification of tuberculosis positive or control.
11
+ """
12
+
13
+ _CITATION = """\
14
+ Nievas Offidani, M. and Delrieux, C. (2023) “The MultiCaRe Dataset: A Multimodal Case Report Dataset with Clinical Cases, Labeled Images and Captions from Open Access PMC Articles”. Zenodo. doi: 10.5281/zenodo.10079370.
15
+ """
16
+
17
+ class TuberculosisDataset(DatasetBuilder):
18
+ # Define dataset's name
19
+ BUILDER_CONFIGS = [
20
+ BuilderConfig(name="tuberculosis_dataset", version=Version("1.0.0"))
21
+ ]
22
+
23
+ def _info(self):
24
+ return DatasetInfo(
25
+ description = _DESCRIPTION,
26
+ features=Features({
27
+ "case_id": Value("string"),
28
+ "gender": Value("string"),
29
+ "age": Value("int8"),
30
+ "case_text": Value("string"),
31
+ "keywords": Value("string"),
32
+ "image_arrays": Sequence(Array3D(dtype="uint8")),
33
+ "caption": Value("string"),
34
+ }),
35
+ supervised_keys = None,
36
+ homepage = "https://zenodo.org/api/records/10079370/files-archive",
37
+ citation = _CITATION,
38
+ )
39
+
40
+ def _split_generators(self, dl_manager: DownloadManager):
41
+ urls = {
42
+ "cases_csv": "https://raw.githubusercontent.com/zhankai-ye/tuberculosis_dataset/main/cases.csv",
43
+ "keywords_json": "https://raw.githubusercontent.com/zhankai-ye/tuberculosis_dataset/main/article_metadata.json",
44
+ "caption_json": "https://raw.githubusercontent.com/zhankai-ye/tuberculosis_dataset/main/image_metadata.json",
45
+ "image_filenames_json": "https://raw.githubusercontent.com/zhankai-ye/tuberculosis_dataset/main/image_filenames.json",
46
+ "images_zip": "https://github.com/zhankai-ye/tuberculosis_dataset/raw/main/images/PMC.zip",
47
+ }
48
+
49
+ downloaded_files = dl_manager.download_and_extract(urls)
50
+
51
+ return [
52
+ SplitGenerator(name=Split.TRAIN, gen_kwargs=downloaded_files),
53
+ ]
54
+
55
+ def _generate_examples(self, cases_csv, keywords_json, caption_json, image_filenames_json, images_zip):
56
+ # Load data
57
+ cases_df = pd.read_csv(cases_csv)
58
+ with open(keywords_json, 'r') as f:
59
+ keywords = pd.json_normalize(json.load(f))
60
+ with open(caption_json, 'r') as f:
61
+ caption = pd.json_normalize(json.load(f))
62
+ with open(image_filenames_json, 'r') as f:
63
+ image_filenames = json.load(f)
64
+
65
+ # Preprocess and merge dataframes
66
+ cases_df.dropna(subset=['age'], inplace=True)
67
+ keywords['keywords'] = keywords['keywords'].apply(lambda x: ', '.join(x) if isinstance(x, list) else x)
68
+ merged_df = pd.merge(cases_df, keywords[['pmcid', 'keywords']], left_on='pmcid', right_on='pmcid', how='left')
69
+ merged_df = pd.merge(merged_df, caption[['case_id', 'caption']], on='case_id', how='left')
70
+ merged_df = merged_df.where(pd.notnull(merged_df), None)
71
+ merged_df['age'] = merged_df['age'].astype('int8')
72
+
73
+ # Process images
74
+ with zipfile.ZipFile(images_zip, 'r') as zip_ref:
75
+ zip_ref.extractall("/tmp/images")
76
+ image_arrays = self._process_images("/tmp/images", image_filenames)
77
+
78
+ for idx, row in merged_df.iterrows():
79
+ record = {
80
+ "case_id": row["case_id"],
81
+ "gender": row["gender"],
82
+ "age": row["age"],
83
+ "case_text": row["case_text"],
84
+ "keywords": row["keywords"],
85
+ "image_arrays": image_arrays.get(row["pmcid"], []),
86
+ "caption": row["caption"],
87
+ }
88
+ yield idx, record
89
+
90
+ def _process_images(self, extracted_images_dir, image_filenames):
91
+ image_arrays = {}
92
+ for pmcid, filenames in image_filenames.items():
93
+ for filename in filenames:
94
+ file_path = f"{extracted_images_dir}/{filename}"
95
+ with Image.open(file_path) as img:
96
+ img_array = np.array(img)
97
+ if pmcid in image_arrays:
98
+ image_arrays[pmcid].append(img_array.tolist())
99
+ else:
100
+ image_arrays[pmcid] = [img_array.tolist()]
101
+ return image_arrays