|
import json |
|
import os |
|
|
|
import datasets |
|
import numpy as np |
|
|
|
_DESCRIPTION = """ |
|
MAPLM: A Real-World Large-Scale Vision-Language Benchmark for Map and Traffic Scene Understanding |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/llvm-ad/maplm" |
|
|
|
_LICENSE = "https://github.com/LLVM-AD/MAPLM/blob/main/LICENSE" |
|
|
|
_CITATION = """\ |
|
@inproceedings{cao_maplm_2024, |
|
title = {{MAPLM}: {A} {Real}-{World} {Large}-{Scale} {Vision}-{Language} {Dataset} for {Map} and {Traffic} {Scene} {Understanding}}, |
|
booktitle = {{CVPR}}, |
|
author = {Cao, Xu and Zhou, Tong and Ma, Yunsheng and Ye, Wenqian and Cui, Can and Tang, Kun and Cao, Zhipeng and Liang, Kaizhao and Wang, Ziran and Rehg, James M. and Zheng, Chao}, |
|
year = {2024}, |
|
} |
|
""" |
|
|
|
|
|
class MapLMBuilderConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for MapLM dataset.""" |
|
|
|
def __init__(self, name, splits): |
|
super(MapLMBuilderConfig, self).__init__(name=name) |
|
self.splits = splits |
|
|
|
|
|
class MapLMDataset(datasets.GeneratorBasedBuilder): |
|
BUILDER_CONFIG_CLASS = MapLMBuilderConfig |
|
BUILDER_CONFIGS = [ |
|
MapLMBuilderConfig( |
|
name="v2.0", |
|
splits=["train", "val", "test"], |
|
) |
|
] |
|
DEFAULT_CONFIG_NAME = "v2.0" |
|
|
|
def _info(self): |
|
|
|
feature_dict = { |
|
"frame_id": datasets.Value("string"), |
|
"images": datasets.Sequence(datasets.Value("string")), |
|
"question": datasets.Sequence(datasets.Value("string")), |
|
"options": datasets.Sequence(datasets.Sequence(datasets.Value("string"))), |
|
"answer": datasets.Sequence(datasets.Sequence(datasets.Value("string"))), |
|
"tag": datasets.Sequence(datasets.Value("string")), |
|
} |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=datasets.Features(feature_dict), |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
splits = [] |
|
data_root = dl_manager.download("data/") |
|
for split in self.config.splits: |
|
annotation_file = os.path.join(data_root, f"{split}_v2.json") |
|
annotations = json.load(open(annotation_file)) |
|
if split == "test": |
|
generator = datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={"annotations": annotations}, |
|
) |
|
elif split == "train": |
|
generator = datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"annotations": annotations}, |
|
) |
|
elif split == "val": |
|
generator = datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={"annotations": annotations}, |
|
) |
|
else: |
|
continue |
|
splits.append(generator) |
|
return splits |
|
|
|
|
|
def _generate_examples(self, annotations): |
|
for i, anno_key in enumerate(annotations): |
|
data_item = {} |
|
data_item["frame_id"] = annotations[anno_key]["id"] |
|
data_item["images"] = list(annotations[anno_key]["image_paths"].values()) |
|
data_item["question"] = [] |
|
data_item["options"] = [] |
|
data_item["answer"] = [] |
|
data_item["tag"] = [] |
|
for perception_key in annotations[anno_key]["QA"]["perception"]: |
|
data_item["question"].append( |
|
annotations[anno_key]["QA"]["perception"][perception_key][ |
|
"question" |
|
] |
|
) |
|
data_item["options"].append( |
|
annotations[anno_key]["QA"]["perception"][perception_key]["option"] |
|
) |
|
anno_answer = annotations[anno_key]["QA"]["perception"][perception_key][ |
|
"answer" |
|
] |
|
if isinstance(anno_answer, list): |
|
data_item["answer"].append(anno_answer) |
|
else: |
|
data_item["answer"].append([anno_answer]) |
|
|
|
data_item["tag"].append( |
|
annotations[anno_key]["QA"]["perception"][perception_key]["tag"] |
|
) |
|
|
|
for behavior_key in annotations[anno_key]["QA"]["behavior"]: |
|
data_item["question"].append( |
|
annotations[anno_key]["QA"]["behavior"][behavior_key]["question"] |
|
) |
|
data_item["options"].append( |
|
annotations[anno_key]["QA"]["behavior"][behavior_key]["option"] |
|
) |
|
data_item["answer"].append( |
|
annotations[anno_key]["QA"]["behavior"][behavior_key]["answer"] |
|
) |
|
data_item["tag"].append( |
|
annotations[anno_key]["QA"]["behavior"][behavior_key]["tag"] |
|
) |
|
|
|
yield i, data_item |
|
|