Datasets:
Tasks:
Image Classification
Sub-tasks:
multi-class-image-classification
Languages:
English
Size:
100K<n<1M
ArXiv:
License:
File size: 5,010 Bytes
62ec5aa 3afeebb 62ec5aa 3afeebb 5c7307b 16efcee 5c7307b 3dcb747 3afeebb 62ec5aa 3afeebb 62ec5aa 3afeebb 62ec5aa 3afeebb 5c7307b 62ec5aa e062905 5c7307b 62ec5aa 5c7307b 62ec5aa 5c7307b 62ec5aa 3afeebb 527c8a6 5c7307b 3afeebb 62ec5aa 527c8a6 3afeebb b741c32 1d1d19b 3afeebb 5c7307b 7c4579a 3dcb747 5c7307b 814dcde 7c4579a 62ec5aa 7c4579a 7376c1e 7c4579a 3dcb747 7376c1e 4a804e2 5cddd15 7c4579a 58fcc18 7c4579a 217a7da 7c4579a 62ec5aa 7c4579a 7e19fce 62ec5aa 7e19fce 7c4579a 5c7307b 8b7ba21 62ec5aa ddf25eb 8b7ba21 62ec5aa 4d201e1 ddf25eb b741c32 ddf25eb 3afeebb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 |
# Copyright 2022 Cristóbal Alcázar
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NIH Chest X-ray Dataset"""
import os
import datasets
from datasets.tasks import ImageClassification
from requests import get
from pandas import read_csv
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{Wang_2017,
doi = {10.1109/cvpr.2017.369},
url = {https://doi.org/10.1109%2Fcvpr.2017.369},
year = 2017,
month = {jul},
publisher = {{IEEE}
},
author = {Xiaosong Wang and Yifan Peng and Le Lu and Zhiyong Lu and Mohammadhadi Bagheri and Ronald M. Summers},
title = {{ChestX}-Ray8: Hospital-Scale Chest X-Ray Database and Benchmarks on Weakly-Supervised Classification and Localization of Common Thorax Diseases},
booktitle = {2017 {IEEE} Conference on Computer Vision and Pattern Recognition ({CVPR})}
}
"""
_DESCRIPTION = """\
The NIH Chest X-ray dataset consists of 100,000 de-identified images of chest x-rays. The images are in PNG format.
The data is provided by the NIH Clinical Center and is available through the NIH download site: https://nihcc.app.box.com/v/ChestXray-NIHCC
"""
_HOMEPAGE = "https://nihcc.app.box.com/v/chestxray-nihcc"
_REPO = "https://huggingface.co/datasets/alkzar90/NIH-Chest-X-ray-dataset/resolve/main/data"
_IMAGE_URLS = [
f"{_REPO}/images/images_001.zip",
f"{_repo}/images/images_003.zip",
f"{_REPO}/images/images_004.zip",
f"{_REPO}/images/images_005.zip"
#'https://huggingface.co/datasets/alkzar90/NIH-Chest-X-ray-dataset/resolve/main/dummy/0.0.0/images_001.tar.gz',
#'https://huggingface.co/datasets/alkzar90/NIH-Chest-X-ray-dataset/resolve/main/dummy/0.0.0/images_002.tar.gz'
]
_URLS = {
"train_val_list": f"{_REPO}/train_val_list.txt",
"test_list": f"{_REPO}/test_list.txt",
"labels": f"{_REPO}/Data_Entry_2017_v2020.csv",
"image_urls": _IMAGE_URLS
}
_LABEL2IDX = {"No Finding": 0,
"Atelectasis": 1,
"Cardiomegaly": 2,
"Effusion": 3,
"Infiltration": 4,
"Mass": 5,
"Nodule": 6,
"Pneumonia": 7,
"Pneumothorax": 8,
"Consolidation": 9,
"Edema": 10,
"Emphysema": 11,
"Fibrosis": 12,
"Pleural_Thickening": 13,
"Hernia": 14}
_NAMES = list(_LABEL2IDX.keys())
class XChest(datasets.GeneratorBasedBuilder):
"""NIH Image Chest X-ray dataset."""
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image_file_path": datasets.Value("string"),
"image": datasets.Image(),
"labels": datasets.features.Sequence(
datasets.features.ClassLabel(num_classes=len(_NAMES),
names=_NAMES)
)
}
),
supervised_keys=("image", "labels"),
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
# Get the image names that belong to the train-val dataset
logger.info("Downloading the train_val_list image names")
train_val_list = get(_URLS['train_val_list']).iter_lines()
train_val_list = set([x.decode('UTF8') for x in train_val_list])
logger.info(f"Check train_val_list: {train_val_list}")
# Create list for store the name of the images for each dataset
train_files = []
test_files = []
# Download batches
data_files = dl_manager.download_and_extract(_URLS["image_urls"])
# Iterate trought image folder and check if they belong to
# the trainset or testset
for batch in data_files:
logger.info(f"Batch for data_files: {batch}")
path_files = dl_manager.iter_files(batch)
for img in path_files:
if img.split('/')[-1] in train_val_list:
train_files.append(img)
else:
test_files.append(img)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"files": iter(train_files)
}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"files": iter(test_files)
}
)
]
def _generate_examples(self, files):
# Read csv with image labels
label_csv = read_csv(_URLS["labels"])
for i, path in enumerate(files):
file_name = os.path.basename(path)
# Get image id to filter the respective row of the csv
image_id = file_name.split('/')[-1]
image_labels = label_csv[label_csv["Image Index"] == image_id]["Finding Labels"].values[0].split("|")
if file_name.endswith(".png"):
yield i, {
"image_file_path": path,
"image": path,
"labels": image_labels,
}
|