File size: 4,941 Bytes
aaf5555 256b18b 30d7412 aaf5555 30d7412 477bf25 30d7412 1f5b8cf 30d7412 aaf5555 30d7412 1f5b8cf 572664c 112785a 30d7412 aaf5555 cc36cbe aaf5555 30d7412 c9f41b3 89991b9 7742117 89991b9 d87f6f6 89991b9 8258d5e 8bc369b aaf5555 7742117 2244cf2 7742117 112785a 7cfc1a4 30d7412 7cfc1a4 f4eda52 7cfc1a4 4374727 aaf5555 cfd6fff 30d7412 4374727 30d7412 eb71e30 fc74df6 112785a 30d7412 112785a 30d7412 e2ab970 112785a 30d7412 aaf5555 17d7c85 599a10f ab8afac d10ffea 17d7c85 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 |
from datasets import GeneratorBasedBuilder, DownloadManager, DatasetInfo, Features, Value, Sequence, ClassLabel, Image, BuilderConfig, SplitGenerator, Version
import datasets
import pandas as pd
import json
import zipfile
import os
_DESCRIPTION = """\
This dataset is curated from the original “The MultiCaRe Dataset” to focus on the chest tuberculosis patients and can be used to develop algorithms of the segmentation of chest CT images and the classification of tuberculosis positive or control.
"""
_CITATION = """\
Nievas Offidani, M. and Delrieux, C. (2023) “The MultiCaRe Dataset: A Multimodal Case Report Dataset with Clinical Cases, Labeled Images and Captions from Open Access PMC Articles”. Zenodo. doi: 10.5281/zenodo.10079370.
"""
class TuberculosisDataset(GeneratorBasedBuilder):
# Define dataset's name
BUILDER_CONFIGS = [
BuilderConfig(name="tuberculosis_dataset", version=Version("1.0.0"))
]
def _info(self):
return DatasetInfo(
description=_DESCRIPTION,
features=Features({
"case_id": Value("string"),
"gender": Value("string"),
"age": Value("int8"),
"case_text": Value("string"),
"keywords": Value("string"),
"image_file": Image(), # Using Image type
"caption": Value("string"),
}),
supervised_keys=None,
homepage="https://zenodo.org/records/10079370",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
base_url = "https://raw.githubusercontent.com/zhankai-ye/tuberculosis_dataset/main/"
urls = {
"cases_csv": f"{base_url}cases.csv",
"keywords_json": "https://raw.githubusercontent.com/zhankai-ye/tuberculosis_dataset/a774776663fe4ce5e960f502dc337b0e77451ca7/article_metadata.json",
"caption_json": f"{base_url}image_metadata.json",
"images_zip": "https://github.com/zhankai-ye/tuberculosis_dataset/raw/main/images/PMC.zip"
}
downloaded_files = dl_manager.download_and_extract(urls)
return [
SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=downloaded_files),
]
def _generate_examples(self, cases_csv, keywords_json, caption_json, images_zip):
# Load CSV
cases_df = pd.read_csv(cases_csv)
cases_df.dropna(subset=['age'], inplace=True)
# Load Keywords JSON
with open(keywords_json, 'r') as f:
keywords_json_data = json.load(f)
keywords = pd.json_normalize(keywords_json_data)
keywords['keywords'] = keywords['keywords'].apply(lambda x: ', '.join(x) if isinstance(x, list) else x)
# Load Caption JSON
caption_json_data = []
with open(caption_json, 'r') as f:
for line in f:
caption_json_data.append(json.loads(line))
caption = pd.json_normalize(caption_json_data)
# Merge DataFrames
merged_df = pd.merge(cases_df, keywords[['pmcid', 'keywords']], left_on='pmcid', right_on='pmcid', how='left')
merged_df = pd.merge(merged_df, caption[['case_id', 'caption']], on='case_id', how='left')
merged_df = merged_df.where(pd.notnull(merged_df), None)
merged_df['age'] = merged_df['age'].astype('int8')
# Extract and prepare image file paths
image_file_paths = self._prepare_image_file_paths(images_zip)
# Yield examples
for idx, row in merged_df.iterrows():
image_file = image_file_paths.get(row["pmcid"],None)
yield idx, {
"case_id": row["case_id"],
"gender": row["gender"],
"age": int(row["age"]),
"case_text": row["case_text"],
"keywords": row["keywords"],
"image_file": image_file,
"caption": row["caption"],
}
def _prepare_image_file_paths(self, images_zip_path):
image_file_paths = {}
temp_dir = "temp_images"
# Check if images_zip_path is a directory or a zip file
if os.path.isdir(images_zip_path):
base_path = images_zip_path
elif os.path.isfile(images_zip_path) and zipfile.is_zipfile(images_zip_path):
# Extract the zip file to a temporary directory
with zipfile.ZipFile(images_zip_path, 'r') as zip_ref:
zip_ref.extractall(temp_dir)
base_path = temp_dir
else:
raise ValueError("images_zip_path must be a directory or a zip file")
# Walk through the base_path
for root, _, files in os.walk(base_path):
for file in files:
key = file.split('_')[0]
if key not in image_file_paths:
image_file_paths[key] = os.path.join(root, file)
return image_file_paths
|