Tuberculosis_Dataset / Tuberculosis_Dataset.py
moukaii's picture
Update Tuberculosis_Dataset.py
1e46738 verified
raw
history blame
4.38 kB
from datasets import GeneratorBasedBuilder, DownloadManager, DatasetInfo, Features, Value, Sequence, ClassLabel, Image, BuilderConfig, SplitGenerator, Version
import datasets
import pandas as pd
import json
import zipfile
import os
_DESCRIPTION = """\
This dataset is curated from the original “The MultiCaRe Dataset” to focus on the chest tuberculosis patients and can be used to develop algorithms of the segmentation of chest CT images and the classification of tuberculosis positive or control.
"""
_CITATION = """\
Nievas Offidani, M. and Delrieux, C. (2023) “The MultiCaRe Dataset: A Multimodal Case Report Dataset with Clinical Cases, Labeled Images and Captions from Open Access PMC Articles”. Zenodo. doi: 10.5281/zenodo.10079370.
"""
class TuberculosisDataset(GeneratorBasedBuilder):
# Define dataset's name
BUILDER_CONFIGS = [
BuilderConfig(name="tuberculosis_dataset", version=Version("1.0.0"))
]
def _info(self):
return DatasetInfo(
description=_DESCRIPTION,
features=Features({
"case_id": Value("string"),
"gender": Value("string"),
"age": Value("int8"),
"case_text": Value("string"),
"keywords": Value("string"),
"image_files": Sequence(Image()), # Change from image_arrays to image_files
"caption": Value("string"),
}),
supervised_keys=None,
homepage="https://zenodo.org/api/records/10079370/files-archive",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
base_url = "https://raw.githubusercontent.com/zhankai-ye/tuberculosis_dataset/main/"
urls = {
"cases_csv": f"{base_url}cases.csv",
"keywords_json": f"{base_url}article_metadata.json",
"caption_json": f"{base_url}image_metadata.json",
"images_zip": "https://github.com/zhankai-ye/tuberculosis_dataset/raw/main/images/PMC.zip"
}
downloaded_files = dl_manager.download_and_extract(urls)
return [
SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=downloaded_files),
]
def _generate_examples(self, cases_csv, keywords_json, caption_json, images_zip):
# Load CSV
cases_df = pd.read_csv(cases_csv)
cases_df.dropna(subset=['age'], inplace=True)
# Load Keywords JSON
with open(keywords_json, 'r') as f:
keywords_json_data = json.load(f)
keywords = pd.json_normalize(keywords_json_data)
keywords['keywords'] = keywords['keywords'].apply(lambda x: ', '.join(x) if isinstance(x, list) else x)
# Load Caption JSON
caption_json_data = []
with open(caption_json, 'r') as f:
for line in f:
caption_json_data.append(json.loads(line))
caption = pd.json_normalize(caption_json_data)
# Merge DataFrames
merged_df = pd.merge(cases_df, keywords[['pmcid', 'keywords']], left_on='case_id', right_on='pmcid', how='left')
merged_df = pd.merge(merged_df, caption[['case_id', 'caption']], on='case_id', how='left')
merged_df = merged_df.where(pd.notnull(merged_df), None)
merged_df['age'] = merged_df['age'].astype('int8')
# Extract and prepare image file paths
image_file_paths = self._prepare_image_file_paths(images_zip)
# Yield examples
for idx, row in merged_df.iterrows():
image_files = image_file_paths.get(row["case_id"], [])
yield idx, {
"case_id": row["case_id"],
"gender": row["gender"],
"age": int(row["age"]),
"case_text": row["case_text"],
"keywords": row["keywords"],
"image_files": image_files,
"caption": row["caption"],
}
def _prepare_image_file_paths(self, images_zip_path):
image_file_paths = {}
for root, _, files in os.walk(images_zip_path):
for file in files:
if file.endswith('.jpg'):
key = file.split('_')[0]
if key not in image_file_paths:
image_file_paths[key] = []
image_file_paths[key].append(os.path.join(root, file))
return image_file_paths