Datasets:

Languages:
English
ArXiv:
License:
File size: 14,750 Bytes
ff61db4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83b2798
ff61db4
 
 
 
 
 
 
 
 
 
 
 
 
83b2798
ff61db4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83b2798
ff61db4
 
 
 
 
 
 
 
 
83b2798
ff61db4
 
 
 
 
 
 
 
 
 
 
83b2798
ff61db4
 
 
 
 
 
 
 
 
 
83b2798
ff61db4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quickdraw dataset"""

import io
import json
import os
import struct
import textwrap
from datetime import datetime

import numpy as np

import datasets
from datasets.tasks import ImageClassification


_CITATION = """\
@article{DBLP:journals/corr/HaE17,
  author    = {David Ha and
               Douglas Eck},
  title     = {A Neural Representation of Sketch Drawings},
  journal   = {CoRR},
  volume    = {abs/1704.03477},
  year      = {2017},
  url       = {http://arxiv.org/abs/1704.03477},
  archivePrefix = {arXiv},
  eprint    = {1704.03477},
  timestamp = {Mon, 13 Aug 2018 16:48:30 +0200},
  biburl    = {https://dblp.org/rec/bib/journals/corr/HaE17},
  bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""

_DESCRIPTION = """\
The Quick Draw Dataset is a collection of 50 million drawings across 345 categories, contributed by players of the game Quick, Draw!.
The drawings were captured as timestamped vectors, tagged with metadata including what the player was asked to draw and in which country the player was located.
"""

_HOMEPAGE = "https://quickdraw.withgoogle.com/data"

_LICENSE = "CC BY 4.0"

_NAMES = """\
aircraft carrier,airplane,alarm clock,ambulance,angel
animal migration,ant,anvil,apple,arm
asparagus,axe,backpack,banana,bandage
barn,baseball bat,baseball,basket,basketball
bat,bathtub,beach,bear,beard
bed,bee,belt,bench,bicycle
binoculars,bird,birthday cake,blackberry,blueberry
book,boomerang,bottlecap,bowtie,bracelet
brain,bread,bridge,broccoli,broom
bucket,bulldozer,bus,bush,butterfly
cactus,cake,calculator,calendar,camel
camera,camouflage,campfire,candle,cannon
canoe,car,carrot,castle,cat
ceiling fan,cell phone,cello,chair,chandelier
church,circle,clarinet,clock,cloud
coffee cup,compass,computer,cookie,cooler
couch,cow,crab,crayon,crocodile
crown,cruise ship,cup,diamond,dishwasher
diving board,dog,dolphin,donut,door
dragon,dresser,drill,drums,duck
dumbbell,ear,elbow,elephant,envelope
eraser,eye,eyeglasses,face,fan
feather,fence,finger,fire hydrant,fireplace
firetruck,fish,flamingo,flashlight,flip flops
floor lamp,flower,flying saucer,foot,fork
frog,frying pan,garden hose,garden,giraffe
goatee,golf club,grapes,grass,guitar
hamburger,hammer,hand,harp,hat
headphones,hedgehog,helicopter,helmet,hexagon
hockey puck,hockey stick,horse,hospital,hot air balloon
hot dog,hot tub,hourglass,house plant,house
hurricane,ice cream,jacket,jail,kangaroo
key,keyboard,knee,knife,ladder
lantern,laptop,leaf,leg,light bulb
lighter,lighthouse,lightning,line,lion
lipstick,lobster,lollipop,mailbox,map
marker,matches,megaphone,mermaid,microphone
microwave,monkey,moon,mosquito,motorbike
mountain,mouse,moustache,mouth,mug
mushroom,nail,necklace,nose,ocean
octagon,octopus,onion,oven,owl
paint can,paintbrush,palm tree,panda,pants
paper clip,parachute,parrot,passport,peanut
pear,peas,pencil,penguin,piano
pickup truck,picture frame,pig,pillow,pineapple
pizza,pliers,police car,pond,pool
popsicle,postcard,potato,power outlet,purse
rabbit,raccoon,radio,rain,rainbow
rake,remote control,rhinoceros,rifle,river
roller coaster,rollerskates,sailboat,sandwich,saw
saxophone,school bus,scissors,scorpion,screwdriver
sea turtle,see saw,shark,sheep,shoe
shorts,shovel,sink,skateboard,skull
skyscraper,sleeping bag,smiley face,snail,snake
snorkel,snowflake,snowman,soccer ball,sock
speedboat,spider,spoon,spreadsheet,square
squiggle,squirrel,stairs,star,steak
stereo,stethoscope,stitches,stop sign,stove
strawberry,streetlight,string bean,submarine,suitcase
sun,swan,sweater,swing set,sword
syringe,t-shirt,table,teapot,teddy-bear
telephone,television,tennis racquet,tent,The Eiffel Tower
The Great Wall of China,The Mona Lisa,tiger,toaster,toe
toilet,tooth,toothbrush,toothpaste,tornado
tractor,traffic light,train,tree,triangle
trombone,truck,trumpet,umbrella,underwear
van,vase,violin,washing machine,watermelon
waterslide,whale,wheel,windmill,wine bottle
wine glass,wristwatch,yoga,zebra,zigzag
"""
_NAMES = [name for line in _NAMES.strip().splitlines() for name in line.strip().split(",")]

_CONFIG_NAME_TO_BASE_URL = {
    "raw": "https://storage.googleapis.com/quickdraw_dataset/full/raw/{}.ndjson",
    "preprocessed_simplified_drawings": "https://storage.googleapis.com/quickdraw_dataset/full/binary/{}.bin",
    "preprocessed_bitmaps": "https://storage.googleapis.com/quickdraw_dataset/full/numpy_bitmap/{}.npy",
    "sketch_rnn": "https://storage.googleapis.com/quickdraw_dataset/sketchrnn/{}.npz",
    "sketch_rnn_full": "https://storage.googleapis.com/quickdraw_dataset/sketchrnn/{}.full.npz",
}


class Quickdraw(datasets.GeneratorBasedBuilder):
    """Quickdraw dataset"""

    VERSION = datasets.Version("1.0.0")

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(name="raw", version=VERSION, description="The raw moderated dataset"),
        datasets.BuilderConfig(
            name="preprocessed_simplified_drawings",
            version=VERSION,
            description=textwrap.dedent(
                """\
            The simplified version of the dataset with the simplified vectors, removed timing information, and the data positioned and scaled into a 256x256 region.
            The simplification process was:
                1.Align the drawing to the top-left corner, to have minimum values of 0.
                2.Uniformly scale the drawing, to have a maximum value of 255.
                3.Resample all strokes with a 1 pixel spacing.
                4.Simplify all strokes using the Ramer-Douglas-Peucker algorithm with an epsilon value of 2.0.
                """
            ),
        ),
        datasets.BuilderConfig(
            name="preprocessed_bitmaps",
            version=VERSION,
            description="The preprocessed dataset where all the simplified drawings have been rendered into a 28x28 grayscale bitmap.",
        ),
        datasets.BuilderConfig(
            name="sketch_rnn",
            version=VERSION,
            description=textwrap.dedent(
                """\
                This dataset was used for training the Sketch-RNN model from the paper https://arxiv.org/abs/1704.03477.
                In this dataset, 75K samples (70K Training, 2.5K Validation, 2.5K Test) has been randomly selected from each category,
                processed with RDP line simplification with an epsilon parameter of 2.0
                """
            ),
        ),
        datasets.BuilderConfig(
            name="sketch_rnn_full",
            version=VERSION,
            description="Compared to the `sketch_rnn` config, this version provides the full data for each category for training more complex models.",
        ),
    ]

    DEFAULT_CONFIG_NAME = "preprocessed_bitmaps"

    def _info(self):
        if self.config.name == "raw":
            features = datasets.Features(
                {
                    "key_id": datasets.Value("string"),
                    "word": datasets.ClassLabel(names=_NAMES),
                    "recognized": datasets.Value("bool"),
                    "timestamp": datasets.Value("timestamp[us, tz=UTC]"),
                    "countrycode": datasets.Value("string"),
                    "drawing": datasets.Sequence(
                        {
                            "x": datasets.Sequence(datasets.Value("float32")),
                            "y": datasets.Sequence(datasets.Value("float32")),
                            "t": datasets.Sequence(datasets.Value("int32")),
                        }
                    ),
                }
            )
        elif self.config.name == "preprocessed_simplified_drawings":
            features = datasets.Features(
                {
                    "key_id": datasets.Value("string"),
                    "word": datasets.ClassLabel(names=_NAMES),
                    "recognized": datasets.Value("bool"),
                    "timestamp": datasets.Value("timestamp[us, tz=UTC]"),
                    "countrycode": datasets.Value("string"),
                    "drawing": datasets.Sequence(
                        {
                            "x": datasets.Sequence(datasets.Value("uint8")),
                            "y": datasets.Sequence(datasets.Value("uint8")),
                        }
                    ),
                }
            )
        elif self.config.name == "preprocessed_bitmaps":
            features = datasets.Features(
                {
                    "image": datasets.Image(),
                    "label": datasets.ClassLabel(names=_NAMES),
                }
            )
        else:  # sketch_rnn, sketch_rnn_full
            features = datasets.Features(
                {
                    "word": datasets.ClassLabel(names=_NAMES),
                    "drawing": datasets.Array2D(shape=(None, 3), dtype="int16"),
                }
            )
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
            task_templates=[ImageClassification(image_column="image", label_column="label")]
            if self.config.name == "preprocessed_bitmaps"
            else None,
        )

    def _split_generators(self, dl_manager):
        base_url = _CONFIG_NAME_TO_BASE_URL[self.config.name]
        if not self.config.name.startswith("sketch_rnn"):
            files = dl_manager.download(
                {name: url for name, url in zip(_NAMES, [base_url.format(name) for name in _NAMES])}
            )
            files = [(name, file) for name, file in files.items()]
            return [
                datasets.SplitGenerator(
                    name=datasets.Split.TRAIN,
                    gen_kwargs={
                        "files": files,
                        "split": "train",
                    },
                ),
            ]
        else:
            files = dl_manager.download_and_extract(
                {name: url for name, url in zip(_NAMES, [base_url.format(name) for name in _NAMES])}
            )
            files = [(name, file) for name, file in files.items()]
            return [
                datasets.SplitGenerator(
                    name=datasets.Split.TRAIN,
                    gen_kwargs={
                        "files": files,
                        "split": "train",
                    },
                ),
                datasets.SplitGenerator(
                    name=datasets.Split.VALIDATION,
                    gen_kwargs={
                        "files": files,
                        "split": "valid",
                    },
                ),
                datasets.SplitGenerator(
                    name=datasets.Split.TEST,
                    gen_kwargs={
                        "files": files,
                        "split": "test",
                    },
                ),
            ]

    def _generate_examples(self, files, split):
        if self.config.name == "raw":
            idx = 0
            for _, file in files:
                with open(file, encoding="utf-8") as f:
                    for line in f:
                        example = json.loads(line)
                        example["timestamp"] = datetime.strptime(example["timestamp"], "%Y-%m-%d %H:%M:%S.%f %Z")
                        example["drawing"] = [{"x": x, "y": y, "t": t} for x, y, t in example["drawing"]]
                        yield idx, example
                        idx += 1
        elif self.config.name == "preprocessed_simplified_drawings":
            idx = 0
            for label, file in files:
                with open(file, "rb") as f:
                    while True:
                        try:
                            example = process_struct(f)
                            example["word"] = label
                            yield idx, example
                        except struct.error:
                            break
                        idx += 1
        elif self.config.name == "preprocessed_bitmaps":
            idx = 0
            for label, file in files:
                with open(file, "rb") as f:
                    images = np.load(f)
                    for image in images:
                        yield idx, {
                            "image": image.reshape(28, 28),
                            "label": label,
                        }
                        idx += 1
        else:  # sketch_rnn, sketch_rnn_full
            idx = 0
            for label, file in files:
                with open(os.path.join(file, f"{split}.npy"), "rb") as f:
                    # read entire file since f.seek is not supported in the streaming mode
                    drawings = np.load(io.BytesIO(f.read()), encoding="latin1", allow_pickle=True)
                    for drawing in drawings:
                        yield idx, {
                            "word": label,
                            "drawing": drawing,
                        }
                        idx += 1


def process_struct(fileobj):
    """
    Process a struct from a binary file object.

    The code for this function is borrowed from the following link:
    https://github.com/googlecreativelab/quickdraw-dataset/blob/f0f3beef0fc86393b3771cdf1fc94828b76bc89b/examples/binary_file_parser.py#L19
    """
    (key_id,) = struct.unpack("Q", fileobj.read(8))
    (country_code,) = struct.unpack("2s", fileobj.read(2))
    (recognized,) = struct.unpack("b", fileobj.read(1))
    (timestamp,) = struct.unpack("I", fileobj.read(4))
    (n_strokes,) = struct.unpack("H", fileobj.read(2))
    drawing = []
    for _ in range(n_strokes):
        (n_points,) = struct.unpack("H", fileobj.read(2))
        fmt = str(n_points) + "B"
        x = struct.unpack(fmt, fileobj.read(n_points))
        y = struct.unpack(fmt, fileobj.read(n_points))
        drawing.append({"x": list(x), "y": list(y)})

    return {
        "key_id": str(key_id),
        "recognized": recognized,
        "timestamp": datetime.fromtimestamp(timestamp),
        "countrycode": country_code.decode("utf-8"),
        "drawing": drawing,
    }