tomatotest / tomatotest.py
XingjianL's picture
test
058f64f
raw
history blame
5.39 kB
import io
from PIL import Image
from datasets import GeneratorBasedBuilder, DatasetInfo, Features, SplitGenerator, Value, Array2D, Split
import datasets
import numpy as np
import h5py
from huggingface_hub import HfFileSystem
class CustomConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
super(CustomConfig, self).__init__(**kwargs)
self.dataset_type = kwargs.pop("name", "all")
_metadata_urls = {
"train":"https://huggingface.co/datasets/XingjianLi/tomatotest/resolve/main/train.txt",
"val":"https://huggingface.co/datasets/XingjianLi/tomatotest/resolve/main/val.txt"
}
class RGBSemanticDepthDataset(GeneratorBasedBuilder):
BUILDER_CONFIGS = [
CustomConfig(name="full", version="1.0.0", description="load both segmentation and depth (for all tar files, 160GB)"),
CustomConfig(name="sample", version="1.0.0", description="load both segmentation and depth (for 1 tar file, 870MB)"),
CustomConfig(name="depth", version="1.0.0", description="only load depth (sample)"),
CustomConfig(name="seg", version="1.0.0", description="only load segmentation (sample)"),
] # Configs initialization
BUILDER_CONFIG_CLASS = CustomConfig
def _info(self):
return DatasetInfo(
features=Features({
"left_rgb": datasets.Image(),
"right_rgb": datasets.Image(),
"left_semantic": datasets.Image(),
"left_instance": datasets.Image(),
"left_depth": datasets.Image(),
"right_depth": datasets.Image(),
})
)
def _h5_loader(self, bytes_stream, type_dataset):
# Reference: https://github.com/dwofk/fast-depth/blob/master/dataloaders/dataloader.py#L8-L13
f = io.BytesIO(bytes_stream)
h5f = h5py.File(f, "r")
left_rgb = self._read_jpg(h5f['rgb_left'][:])
if type_dataset == 'depth':
right_rgb = self._read_jpg(h5f['rgb_right'][:])
left_depth = h5f['depth_left'][:].astype(np.float32)
right_depth = h5f['depth_right'][:].astype(np.float32)
return left_rgb, right_rgb, np.zeros((1,1)), np.zeros((1,1)), left_depth, right_depth
elif type_dataset == 'seg':
left_semantic = h5f['seg_left'][:][:,:,2]
left_instance = h5f['seg_left'][:][:,:,0] + h5f['seg_left'][:][:,:,1] * 256
return left_rgb, np.zeros((1,1)), left_semantic, left_instance, np.zeros((1,1)), np.zeros((1,1))
else:
right_rgb = self._read_jpg(h5f['rgb_right'][:])
left_semantic = h5f['seg_left'][:][:,:,2]
left_instance = h5f['seg_left'][:][:,:,0] + h5f['seg_left'][:][:,:,1] * 256
left_depth = h5f['depth_left'][:].astype(np.float32)
right_depth = h5f['depth_right'][:].astype(np.float32)
return left_rgb, right_rgb, left_semantic, left_instance, left_depth, right_depth
def _read_jpg(self, bytes_stream):
return Image.open(io.BytesIO(bytes_stream))
def _split_generators(self, dl_manager):
if 'full' == self.config.dataset_type:
archives = dl_manager.download({"train":self._get_dataset_filenames(),
"val":self._get_dataset_filenames()})
else:
archives = dl_manager.download({"train":[self._get_dataset_filenames()[0]],
"val":[self._get_dataset_filenames()[0]]})
split_metadata = dl_manager.download(_metadata_urls)
return [
SplitGenerator(
name=Split.TRAIN,
gen_kwargs={
"archives": [dl_manager.iter_archive(archive) for archive in archives["train"]],
"split_txt": split_metadata["train"]
},
),
SplitGenerator(
name=Split.VALIDATION,
gen_kwargs={
"archives": [dl_manager.iter_archive(archive) for archive in archives["val"]],
"split_txt": split_metadata["val"]
},
),
]
def _generate_examples(self, archives, split_txt):
#print(split_txt, archives)
with open(split_txt, encoding="utf-8") as split_f:
all_splits = split_f.read().split('\n')
#print(len(all_splits))
for archive in archives:
#print(archive)
for path, file in archive:
if path.split('/')[-1][:-3] not in all_splits:
#print(path.split('/')[-1][:-3], all_splits[0])
continue
#print("added")
left_rgb, right_rgb, left_semantic, left_instance, left_depth, right_depth = self._h5_loader(file.read(), self.config.dataset_type)
yield path, {
"left_rgb": left_rgb,
"right_rgb": right_rgb,
"left_semantic": left_semantic,
"left_instance": left_instance,
"left_depth": left_depth,
"right_depth": right_depth,
}
def _get_dataset_filenames(self):
fs = HfFileSystem()
all_files = fs.ls("datasets/xingjianli/tomatotest/data")
filenames = sorted(['/'.join(f['name'].split('/')[-2:]) for f in all_files])
return filenames