File size: 5,064 Bytes
6f9cd0d 250dbd6 6f9cd0d e859c47 6f9cd0d f8409c6 6f9cd0d f8409c6 6f9cd0d f8409c6 6f9cd0d f8409c6 6f9cd0d f8409c6 6f9cd0d f8409c6 6f9cd0d 250dbd6 f8409c6 e859c47 f8409c6 e859c47 f8409c6 250dbd6 f8409c6 250dbd6 f8409c6 6947305 f8409c6 6947305 f8409c6 6947305 f8409c6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 |
import datasets
import json
import numpy
import torch
_DESCRIPTION = """\
Dataset of pre-processed samples from a small portion of the \
Waymo Open Motion Data for our risk-biased prediction task.
"""
_CITATION = """\
@InProceedings{NiMe:2022,
author = {Haruki Nishimura, Jean Mercat, Blake Wulfe, Rowan McAllister},
title = {RAP: Risk-Aware Prediction for Robust Planning},
booktitle = {Proceedings of the 2022 IEEE International Conference on Robot Learning (CoRL)},
month = {December},
year = {2022},
address = {Grafton Road, Auckland CBD, Auckland 1010},
url = {},
}
"""
_URL = "https://huggingface.co/datasets/jmercat/risk_biased_dataset/resolve/main/"
_URLS = {
"test": _URL + "data.json",
}
class RiskBiasedDataset(datasets.GeneratorBasedBuilder):
"""Dataset of pre-processed samples from a portion of the
Waymo Open Motion Data for the risk-biased prediction task."""
VERSION = datasets.Version("0.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="risk_biased_dataset", version=VERSION, description="Dataset of pre-processed samples from a portion of the Waymo Open Motion Data for the risk-biased prediction task."),
]
DEFAULT_CONFIG_NAME = "risk_biased_dataset"
def _info(self):
return datasets.DatasetInfo(
description= _DESCRIPTION,
features=datasets.Features(
{"x": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("float32"))))),
"mask_x": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("bool")))),
"y": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("float32"))))),
"mask_y": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("bool")))),
"mask_loss": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("bool")))),
"map_data": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("float32"))))),
"mask_map": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("bool")))),
"offset": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("float32")))),
"x_ego": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("float32"))))),
"y_ego": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("float32"))))),
}
),
supervised_keys=None,
homepage="https://sites.google.com/view/corl-risk/home",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls_to_download = _URLS
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"], "split": "test"}),]
def _generate_examples(self, filepath, split):
"""Yields examples."""
assert split == "test"
with open(filepath, "r") as f:
data = json.load(f)
x = torch.from_numpy(numpy.array(data["x"]).astype(numpy.float32))
mask_x = torch.from_numpy(numpy.array(data["mask_x"]).astype(numpy.bool_))
y = torch.from_numpy(numpy.array(data["y"]).astype(numpy.float32))
mask_y = torch.from_numpy(numpy.array(data["mask_y"]).astype(numpy.bool_))
mask_loss = torch.from_numpy( numpy.array(data["mask_loss"]).astype(numpy.bool_))
map_data = torch.from_numpy(numpy.array(data["map_data"]).astype(numpy.float32))
mask_map = torch.from_numpy(numpy.array(data["mask_map"]).astype(numpy.bool_))
offset = torch.from_numpy(numpy.array(data["offset"]).astype(numpy.float32))
x_ego = torch.from_numpy(numpy.array(data["x_ego"]).astype(numpy.float32))
y_ego = torch.from_numpy(numpy.array(data["y_ego"]).astype(numpy.float32))
batch_size = x.shape[0]
for i in range(batch_size):
# yield i, {"x": x[i], "mask_x": mask_x[i],
# "y": y[i], "mask_y": mask_y[i], "mask_loss": mask_loss[i],
# "map_data": map_data[i], "mask_map": mask_map[i],
# "offset": offset[i],
# "x_ego": x_ego[i],
# "y_ego": y_ego[i]}
yield i, {"x": x[i:i+1], "mask_x": mask_x[i:i+1],
"y": y[i:i+1], "mask_y": mask_y[i:i+1], "mask_loss": mask_loss[i:i+1],
"map_data": map_data[i:i+1], "mask_map": mask_map[i:i+1],
"offset": offset[i:i+1],
"x_ego": x_ego[i:i+1],
"y_ego": y_ego[i:i+1]}
|