coco_keypoints / coco_keypoints.py
whyen-wang's picture
update
2f23893
import json
import datasets
from pathlib import Path
_HOMEPAGE = 'https://cocodataset.org/'
_LICENSE = 'Creative Commons Attribution 4.0 License'
_DESCRIPTION = 'COCO is a large-scale object detection, segmentation, and captioning dataset.'
_CITATION = '''\
@article{cocodataset,
author = {Tsung{-}Yi Lin and Michael Maire and Serge J. Belongie and Lubomir D. Bourdev and Ross B. Girshick and James Hays and Pietro Perona and Deva Ramanan and Piotr Doll{'{a} }r and C. Lawrence Zitnick},
title = {Microsoft {COCO:} Common Objects in Context},
journal = {CoRR},
volume = {abs/1405.0312},
year = {2014},
url = {http://arxiv.org/abs/1405.0312},
archivePrefix = {arXiv},
eprint = {1405.0312},
timestamp = {Mon, 13 Aug 2018 16:48:13 +0200},
biburl = {https://dblp.org/rec/bib/journals/corr/LinMBHPRDZ14},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
'''
class COCOKeypointsConfig(datasets.BuilderConfig):
'''Builder Config for coco2017'''
def __init__(
self, description, homepage,
annotation_urls, **kwargs
):
super(COCOKeypointsConfig, self).__init__(
version=datasets.Version('1.0.0', ''),
**kwargs
)
self.description = description
self.homepage = homepage
url = 'http://images.cocodataset.org/zips/'
self.train_image_url = url + 'train2017.zip'
self.val_image_url = url + 'val2017.zip'
self.train_annotation_urls = annotation_urls['train']
self.val_annotation_urls = annotation_urls['validation']
class COCOKeypoints(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
COCOKeypointsConfig(
description=_DESCRIPTION,
homepage=_HOMEPAGE,
annotation_urls={
'train': 'data/keypoints_train.zip',
'validation': 'data/keypoints_validation.zip'
},
)
]
def _info(self):
features = datasets.Features({
'image': datasets.Image(mode='RGB', decode=True, id=None),
'bboxes': datasets.Sequence(
feature=datasets.Sequence(
feature=datasets.Value(dtype='float32', id=None),
length=4, id=None
), length=-1, id=None
),
'keypoints': datasets.Sequence(
feature=datasets.Sequence(
feature=datasets.Sequence(
feature=datasets.Value(dtype='int32', id=None),
), length=17, id=None
), length=-1, id=None
)
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION
)
def _split_generators(self, dl_manager):
train_image_path = dl_manager.download_and_extract(
self.config.train_image_url
)
validation_image_path = dl_manager.download_and_extract(
self.config.val_image_url
)
train_annotation_paths = dl_manager.download_and_extract(
self.config.train_annotation_urls
)
val_annotation_paths = dl_manager.download_and_extract(
self.config.val_annotation_urls
)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
'image_path': f'{train_image_path}/train2017',
'annotation_path': f'{train_annotation_paths}/keypoints_train.jsonl'
}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
'image_path': f'{validation_image_path}/val2017',
'annotation_path': f'{val_annotation_paths}/keypoints_validation.jsonl'
}
)
]
def _generate_examples(self, image_path, annotation_path):
idx = 0
image_path = Path(image_path)
with open(annotation_path, 'r', encoding='utf-8') as f:
for line in f:
obj = json.loads(line.strip())
example = {
'image': str(image_path / obj['image']),
'bboxes': obj['bboxes'],
'keypoints': obj['keypoints']
}
yield idx, example
idx += 1