Datasets:

File size: 5,302 Bytes
463f42a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85a1d8a
463f42a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85a1d8a
463f42a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85a1d8a
463f42a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85a1d8a
 
463f42a
 
 
 
 
85a1d8a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
463f42a
85a1d8a
 
 
 
 
 
 
 
 
 
 
 
 
463f42a
85a1d8a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
import json
import os

import datasets
import numpy as np

_DESCRIPTION = """
MAPLM: A Real-World Large-Scale Vision-Language Benchmark for Map and Traffic Scene Understanding
"""

_HOMEPAGE = "https://github.com/llvm-ad/maplm"

_LICENSE = "https://github.com/LLVM-AD/MAPLM/blob/main/LICENSE"

_CITATION = """\
@inproceedings{cao_maplm_2024,
	title = {{MAPLM}: {A} {Real}-{World} {Large}-{Scale} {Vision}-{Language} {Dataset} for {Map} and {Traffic} {Scene} {Understanding}},
	booktitle = {{CVPR}},
	author = {Cao, Xu and Zhou, Tong and Ma, Yunsheng and Ye, Wenqian and Cui, Can and Tang, Kun and Cao, Zhipeng and Liang, Kaizhao and Wang, Ziran and Rehg, James M. and Zheng, Chao},
	year = {2024},
}
"""


class MapLMBuilderConfig(datasets.BuilderConfig):
    """BuilderConfig for MapLM dataset."""

    def __init__(self, name, splits):
        super(MapLMBuilderConfig, self).__init__(name=name)
        self.splits = splits


class MapLMDataset(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIG_CLASS = MapLMBuilderConfig
    BUILDER_CONFIGS = [
        MapLMBuilderConfig(
            name="v2.0",
            splits=["train", "val", "test"],
        )
    ]
    DEFAULT_CONFIG_NAME = "v2.0"

    def _info(self):
        # info stores information about your dataset like its description, license, and features.
        feature_dict = {
            "frame_id": datasets.Value("string"),
            "images": datasets.Sequence(datasets.Value("string")),
            "question": datasets.Sequence(datasets.Value("string")),
            "options": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
            "answer": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
            "tag": datasets.Sequence(datasets.Value("string")),
        }

        return datasets.DatasetInfo(
            # This is the description that will appear on the datasets page.
            description=_DESCRIPTION,
            # This defines the different columns of the dataset and their types
            features=datasets.Features(feature_dict),
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        splits = []
        data_root = dl_manager.download("data/")
        for split in self.config.splits:
            annotation_file = os.path.join(data_root, f"{split}_v2.json")
            annotations = json.load(open(annotation_file))
            if split == "test":
                generator = datasets.SplitGenerator(
                    name=datasets.Split.TEST,
                    gen_kwargs={"annotations": annotations},
                )
            elif split == "train":
                generator = datasets.SplitGenerator(
                    name=datasets.Split.TRAIN,
                    gen_kwargs={"annotations": annotations},
                )
            elif split == "val":
                generator = datasets.SplitGenerator(
                    name=datasets.Split.VALIDATION,
                    gen_kwargs={"annotations": annotations},
                )
            else:
                continue
            splits.append(generator)
        return splits

    # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
    def _generate_examples(self, annotations):
        for i, anno_key in enumerate(annotations):
            data_item = {}
            data_item["frame_id"] = annotations[anno_key]["id"]
            data_item["images"] = list(annotations[anno_key]["image_paths"].values())
            data_item["question"] = []
            data_item["options"] = []
            data_item["answer"] = []
            data_item["tag"] = []
            for perception_key in annotations[anno_key]["QA"]["perception"]:
                data_item["question"].append(
                    annotations[anno_key]["QA"]["perception"][perception_key][
                        "question"
                    ]
                )
                data_item["options"].append(
                    annotations[anno_key]["QA"]["perception"][perception_key]["option"]
                )
                anno_answer = annotations[anno_key]["QA"]["perception"][perception_key][
                    "answer"
                ]
                if isinstance(anno_answer, list):
                    data_item["answer"].append(anno_answer)
                else:
                    data_item["answer"].append([anno_answer])

                data_item["tag"].append(
                    annotations[anno_key]["QA"]["perception"][perception_key]["tag"]
                )

            for behavior_key in annotations[anno_key]["QA"]["behavior"]:
                data_item["question"].append(
                    annotations[anno_key]["QA"]["behavior"][behavior_key]["question"]
                )
                data_item["options"].append(
                    annotations[anno_key]["QA"]["behavior"][behavior_key]["option"]
                )
                data_item["answer"].append(
                    annotations[anno_key]["QA"]["behavior"][behavior_key]["answer"]
                )
                data_item["tag"].append(
                    annotations[anno_key]["QA"]["behavior"][behavior_key]["tag"]
                )

            yield i, data_item