Tuberculosis_Dataset / TubercuIosis_Dataset.py
moukaii's picture
Update TubercuIosis_Dataset.py
572664c verified
from datasets import GeneratorBasedBuilder, DownloadManager, DatasetInfo, Features, Value, Sequence, ClassLabel, Image, BuilderConfig, SplitGenerator, Version
import datasets
import pandas as pd
import json
import zipfile
import os
_DESCRIPTION = """\
This dataset is curated from the original “The MultiCaRe Dataset” to focus on the chest tuberculosis patients and can be used to develop algorithms of the segmentation of chest CT images and the classification of tuberculosis positive or control.
"""
_CITATION = """\
Nievas Offidani, M. and Delrieux, C. (2023) “The MultiCaRe Dataset: A Multimodal Case Report Dataset with Clinical Cases, Labeled Images and Captions from Open Access PMC Articles”. Zenodo. doi: 10.5281/zenodo.10079370.
"""
class TuberculosisDataset(GeneratorBasedBuilder):
# Define dataset's name
BUILDER_CONFIGS = [
BuilderConfig(name="tuberculosis_dataset", version=Version("1.0.0"))
]
def _info(self):
return DatasetInfo(
description=_DESCRIPTION,
features=Features({
"case_id": Value("string"),
"gender": Value("string"),
"age": Value("int8"),
"case_text": Value("string"),
"keywords": Value("string"),
"image_file": Image(), # Using Image type
"caption": Value("string"),
}),
supervised_keys=None,
homepage="https://zenodo.org/records/10079370",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
base_url = "https://raw.githubusercontent.com/zhankai-ye/tuberculosis_dataset/main/"
urls = {
"cases_csv": f"{base_url}cases.csv",
"keywords_json": "https://raw.githubusercontent.com/zhankai-ye/tuberculosis_dataset/a774776663fe4ce5e960f502dc337b0e77451ca7/article_metadata.json",
"caption_json": f"{base_url}image_metadata.json",
"images_zip": "https://github.com/zhankai-ye/tuberculosis_dataset/raw/main/images/PMC.zip"
}
downloaded_files = dl_manager.download_and_extract(urls)
return [
SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=downloaded_files),
]
def _generate_examples(self, cases_csv, keywords_json, caption_json, images_zip):
# Load CSV
cases_df = pd.read_csv(cases_csv)
cases_df.dropna(subset=['age'], inplace=True)
# Load Keywords JSON
with open(keywords_json, 'r') as f:
keywords_json_data = json.load(f)
keywords = pd.json_normalize(keywords_json_data)
keywords['keywords'] = keywords['keywords'].apply(lambda x: ', '.join(x) if isinstance(x, list) else x)
# Load Caption JSON
caption_json_data = []
with open(caption_json, 'r') as f:
for line in f:
caption_json_data.append(json.loads(line))
caption = pd.json_normalize(caption_json_data)
# Merge DataFrames
merged_df = pd.merge(cases_df, keywords[['pmcid', 'keywords']], left_on='pmcid', right_on='pmcid', how='left')
merged_df = pd.merge(merged_df, caption[['case_id', 'caption']], on='case_id', how='left')
merged_df = merged_df.where(pd.notnull(merged_df), None)
merged_df['age'] = merged_df['age'].astype('int8')
# Extract and prepare image file paths
image_file_paths = self._prepare_image_file_paths(images_zip)
# Yield examples
for idx, row in merged_df.iterrows():
image_file = image_file_paths.get(row["pmcid"],None)
yield idx, {
"case_id": row["case_id"],
"gender": row["gender"],
"age": int(row["age"]),
"case_text": row["case_text"],
"keywords": row["keywords"],
"image_file": image_file,
"caption": row["caption"],
}
def _prepare_image_file_paths(self, images_zip_path):
image_file_paths = {}
temp_dir = "temp_images"
# Check if images_zip_path is a directory or a zip file
if os.path.isdir(images_zip_path):
base_path = images_zip_path
elif os.path.isfile(images_zip_path) and zipfile.is_zipfile(images_zip_path):
# Extract the zip file to a temporary directory
with zipfile.ZipFile(images_zip_path, 'r') as zip_ref:
zip_ref.extractall(temp_dir)
base_path = temp_dir
else:
raise ValueError("images_zip_path must be a directory or a zip file")
# Walk through the base_path
for root, _, files in os.walk(base_path):
for file in files:
key = file.split('_')[0]
if key not in image_file_paths:
image_file_paths[key] = os.path.join(root, file)
return image_file_paths