|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import json |
|
import os |
|
import datasets |
|
import pickle |
|
import gzip |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{Keller:CVPR:2024, |
|
title = {{HIT}: Estimating Internal Human Implicit Tissues from the Body Surface}, |
|
author = {Keller, Marilyn and Arora, Vaibhav and Dakri, Abdelmouttaleb and Chandhok, Shivam and |
|
Machann, Jürgen and Fritsche, Andreas and Black, Michael J. and Pujades, Sergi}, |
|
booktitle = {Proceedings IEEE/CVF Conf.~on Computer Vision and Pattern Recognition (CVPR)}, |
|
month = jun, |
|
year = {2024}, |
|
month_numeric = {6}} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
The HIT dataset is a structured dataset of paired observations of body's inner tissues and the body surface. More concretely, it is a dataset of paired full-body volumetric segmented (bones, lean, and adipose tissue) MRI scans and SMPL meshes capturing the body surface shape for male (N=157) and female (N=241) subjects respectively. This is relevant for medicine, sports science, biomechanics, and computer graphics as it can ease the creation of personalized anatomic digital twins that model our bones, lean, and adipose tissue.""" |
|
|
|
|
|
_HOMEPAGE = "https://hit.is.tue.mpg.de/" |
|
|
|
|
|
_LICENSE = "see https://huggingface.co/datasets/varora/HIT/blob/main/README.md" |
|
|
|
|
|
|
|
|
|
_BASE_URL = "https://huggingface.co/datasets/varora/HIT/tree/main" |
|
_PATHS = { |
|
"male": "/male", |
|
"female": "/female", |
|
} |
|
|
|
class HIT(datasets.GeneratorBasedBuilder): |
|
VERSION = datasets.Version("1.1.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _info(self): |
|
|
|
features = datasets.Features( |
|
{ |
|
"gender": datasets.Value("string"), |
|
"mri_seg": datasets.Array3D(dtype="int32", shape=(None, 192, 256)), |
|
"mri_labels": { |
|
"NO": datasets.Value("int32"), |
|
"LT": datasets.Value("int32"), |
|
"AT": datasets.Value("int32"), |
|
"VAT": datasets.Value("int32"), |
|
"BONE": datasets.Value("int32") |
|
}, |
|
"body_mask": datasets.Array3D(dtype="int64", shape=(None, 192, 256)), |
|
"resolution": datasets.Array2D(dtype="float32", shape=(None, 3)), |
|
"center": datasets.Array2D(dtype="float32", shape=(None, 3)), |
|
"smpl_dict": { |
|
'gender': datasets.Value("string"), |
|
'verts_free': datasets.Array2D(dtype="float32", shape=(6890, 3)), |
|
'verts': datasets.Array2D(dtype="float32", shape=(6890, 3)), |
|
'faces': datasets.Array2D(dtype="uint32", shape=(None, 3)), |
|
'pose': datasets.Sequence(datasets.Value("float32")), |
|
'betas': datasets.Sequence(datasets.Value("float32")), |
|
'trans': datasets.Sequence(datasets.Value("float32")) |
|
}, |
|
'body_cont_pc': datasets.Array2D(dtype="float32", shape=(None, 3)), |
|
"dataset_name": datasets.Value("string"), |
|
"subject_ID": datasets.Value("string") |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
|
|
splits = ["train", "val", "test"] |
|
|
|
gender = self.config.name |
|
print(f"Config: {gender}") |
|
|
|
file_structure_url = "hit_dataset.json" |
|
file_structure = dl_manager.download_and_extract(file_structure_url) |
|
|
|
with open(file_structure) as f: |
|
file_structure = json.load(f) |
|
|
|
if not gender is None: |
|
data_urls = {split: [os.path.join(gender, split, filename) for filename in file_structure[gender][split]] for split in splits} |
|
else: |
|
data_urls = {gender: {split: [os.path.join(gender, split, filename) for filename in file_structure[gender][split]] for split in splits} for gender in ['male', 'female']} |
|
archive_paths = dl_manager.download(data_urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": archive_paths['train'], |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"filepath": archive_paths['val'], |
|
"split": "validation", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": archive_paths['test'], |
|
"split": "test" |
|
}, |
|
), |
|
] |
|
|
|
|
|
def _generate_examples(self, filepath, split): |
|
|
|
|
|
|
|
for subject_path in filepath: |
|
with gzip.open(subject_path, 'rb') as f: |
|
data = pickle.load(f) |
|
key = data['subject_ID'] |
|
yield key, data |