Datasets:
Add generation scripts and update README.md
Browse files- .gitignore +32 -0
- README.md +77 -0
- classes.py +113 -0
- generate.py +42 -0
- imagenet-100.py +90 -0
.gitignore
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.DS_Store
|
2 |
+
|
3 |
+
# Byte-compiled / optimized / DLL files
|
4 |
+
__pycache__/
|
5 |
+
*.py[cod]
|
6 |
+
|
7 |
+
# Vim
|
8 |
+
*.swp
|
9 |
+
|
10 |
+
# Distribution / packaging
|
11 |
+
*.egg-info/
|
12 |
+
dist
|
13 |
+
build
|
14 |
+
_version.py
|
15 |
+
|
16 |
+
# VScode IDE
|
17 |
+
.vscode/
|
18 |
+
.env
|
19 |
+
|
20 |
+
# Virtual environment
|
21 |
+
.venv/
|
22 |
+
|
23 |
+
# Local data and scratch
|
24 |
+
.scratch
|
25 |
+
cache
|
26 |
+
|
27 |
+
# Jupyter
|
28 |
+
.ipynb_checkpoints
|
29 |
+
|
30 |
+
# SLURM
|
31 |
+
jobs
|
32 |
+
slurm-*
|
README.md
CHANGED
@@ -125,3 +125,80 @@ configs:
|
|
125 |
- split: validation
|
126 |
path: data/validation-*
|
127 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
125 |
- split: validation
|
126 |
path: data/validation-*
|
127 |
---
|
128 |
+
|
129 |
+
# Dataset Card for ImageNet-100
|
130 |
+
|
131 |
+
ImageNet-100 is a subset of ImageNet with 100 classes randomly selected from the original ImageNet-1k dataset. In addition, the images have been resized to 160 pixels on the shorter side.
|
132 |
+
|
133 |
+
- **Homepage:** https://github.com/HobbitLong/CMC
|
134 |
+
- **Paper:** https://arxiv.org/abs/1906.05849
|
135 |
+
|
136 |
+
## Dataset Structure
|
137 |
+
|
138 |
+
### Data Instances
|
139 |
+
|
140 |
+
An example looks like below:
|
141 |
+
|
142 |
+
```
|
143 |
+
{
|
144 |
+
'image': <PIL.PngImagePlugin.PngImageFile image mode=RGB size=160x213>,
|
145 |
+
'label': 0
|
146 |
+
}
|
147 |
+
```
|
148 |
+
|
149 |
+
### Data Fields
|
150 |
+
|
151 |
+
The data instances have the following fields:
|
152 |
+
|
153 |
+
- `image`: A `PIL.Image.Image` object containing the image.
|
154 |
+
- `label`: an `int` classification label.
|
155 |
+
|
156 |
+
The labels are indexed based on the sorted list of synset ids in [imagenet100.txt](https://raw.githubusercontent.com/HobbitLong/CMC/master/imagenet100.txt) which we automatically map to original class names.
|
157 |
+
|
158 |
+
### Data Splits
|
159 |
+
|
160 |
+
| |train |validation|
|
161 |
+
|-------------|------:|---------:|
|
162 |
+
|# of examples|126689 |5000 |
|
163 |
+
|
164 |
+
## Additional Information
|
165 |
+
|
166 |
+
### Licensing Information
|
167 |
+
|
168 |
+
In exchange for permission to use the ImageNet database (the "Database") at Princeton University and Stanford University, Researcher hereby agrees to the following terms and conditions:
|
169 |
+
|
170 |
+
1. Researcher shall use the Database only for non-commercial research and educational purposes.
|
171 |
+
1. Princeton University and Stanford University make no representations or warranties regarding the Database, including but not limited to warranties of non-infringement or fitness for a particular purpose.
|
172 |
+
1. Researcher accepts full responsibility for his or her use of the Database and shall defend and indemnify the ImageNet team, Princeton University, and Stanford University, including their employees, Trustees, officers and agents, against any and all claims arising from Researcher's use of the Database, including but not limited to Researcher's use of any copies of copyrighted images that he or she may create from the Database.
|
173 |
+
1. Researcher may provide research associates and colleagues with access to the Database provided that they first agree to be bound by these terms and conditions.
|
174 |
+
1. Princeton University and Stanford University reserve the right to terminate Researcher's access to the Database at any time.
|
175 |
+
1. If Researcher is employed by a for-profit, commercial entity, Researcher's employer shall also be bound by these terms and conditions, and Researcher hereby represents that he or she is fully authorized to enter into this agreement on behalf of such employer.
|
176 |
+
1. The law of the State of New Jersey shall apply to all disputes under this agreement.
|
177 |
+
|
178 |
+
### Citation Information
|
179 |
+
|
180 |
+
```bibtex
|
181 |
+
@article{imagenet15russakovsky,
|
182 |
+
Author = {Olga Russakovsky and Jia Deng and Hao Su and Jonathan Krause and Sanjeev Satheesh and Sean Ma and Zhiheng Huang and Andrej Karpathy and Aditya Khosla and Michael Bernstein and Alexander C. Berg and Li Fei-Fei},
|
183 |
+
Title = { {ImageNet Large Scale Visual Recognition Challenge} },
|
184 |
+
Year = {2015},
|
185 |
+
journal = {International Journal of Computer Vision (IJCV)},
|
186 |
+
doi = {10.1007/s11263-015-0816-y},
|
187 |
+
volume={115},
|
188 |
+
number={3},
|
189 |
+
pages={211-252}
|
190 |
+
}
|
191 |
+
|
192 |
+
@inproceedings{tian2020contrastive,
|
193 |
+
title={Contrastive multiview coding},
|
194 |
+
author={Tian, Yonglong and Krishnan, Dilip and Isola, Phillip},
|
195 |
+
booktitle={Computer Vision--ECCV 2020: 16th European Conference, Glasgow, UK, August 23--28, 2020, Proceedings, Part XI 16},
|
196 |
+
pages={776--794},
|
197 |
+
year={2020},
|
198 |
+
organization={Springer}
|
199 |
+
}
|
200 |
+
```
|
201 |
+
|
202 |
+
### Contributions
|
203 |
+
|
204 |
+
Thanks to the 🤗 authors for the [imagenet-1k](https://huggingface.co/datasets/imagenet-1k) which was used as a reference.
|
classes.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Class list for ImageNet-100.
|
3 |
+
|
4 |
+
References:
|
5 |
+
https://github.com/HobbitLong/CMC/blob/master/imagenet100.txt
|
6 |
+
"""
|
7 |
+
|
8 |
+
from collections import OrderedDict
|
9 |
+
|
10 |
+
IMAGENET100_CLASSES = OrderedDict(
|
11 |
+
{
|
12 |
+
"n02869837": "bonnet, poke bonnet",
|
13 |
+
"n01749939": "green mamba",
|
14 |
+
"n02488291": "langur",
|
15 |
+
"n02107142": "Doberman, Doberman pinscher",
|
16 |
+
"n13037406": "gyromitra",
|
17 |
+
"n02091831": "Saluki, gazelle hound",
|
18 |
+
"n04517823": "vacuum, vacuum cleaner",
|
19 |
+
"n04589890": "window screen",
|
20 |
+
"n03062245": "cocktail shaker",
|
21 |
+
"n01773797": "garden spider, Aranea diademata",
|
22 |
+
"n01735189": "garter snake, grass snake",
|
23 |
+
"n07831146": "carbonara",
|
24 |
+
"n07753275": "pineapple, ananas",
|
25 |
+
"n03085013": "computer keyboard, keypad",
|
26 |
+
"n04485082": "tripod",
|
27 |
+
"n02105505": "komondor",
|
28 |
+
"n01983481": "American lobster, Northern lobster, Maine lobster, Homarus americanus",
|
29 |
+
"n02788148": "bannister, banister, balustrade, balusters, handrail",
|
30 |
+
"n03530642": "honeycomb",
|
31 |
+
"n04435653": "tile roof",
|
32 |
+
"n02086910": "papillon",
|
33 |
+
"n02859443": "boathouse",
|
34 |
+
"n13040303": "stinkhorn, carrion fungus",
|
35 |
+
"n03594734": "jean, blue jean, denim",
|
36 |
+
"n02085620": "Chihuahua",
|
37 |
+
"n02099849": "Chesapeake Bay retriever",
|
38 |
+
"n01558993": "robin, American robin, Turdus migratorius",
|
39 |
+
"n04493381": "tub, vat",
|
40 |
+
"n02109047": "Great Dane",
|
41 |
+
"n04111531": "rotisserie",
|
42 |
+
"n02877765": "bottlecap",
|
43 |
+
"n04429376": "throne",
|
44 |
+
"n02009229": "little blue heron, Egretta caerulea",
|
45 |
+
"n01978455": "rock crab, Cancer irroratus",
|
46 |
+
"n02106550": "Rottweiler",
|
47 |
+
"n01820546": "lorikeet",
|
48 |
+
"n01692333": "Gila monster, Heloderma suspectum",
|
49 |
+
"n07714571": "head cabbage",
|
50 |
+
"n02974003": "car wheel",
|
51 |
+
"n02114855": "coyote, prairie wolf, brush wolf, Canis latrans",
|
52 |
+
"n03785016": "moped",
|
53 |
+
"n03764736": "milk can",
|
54 |
+
"n03775546": "mixing bowl",
|
55 |
+
"n02087046": "toy terrier",
|
56 |
+
"n07836838": "chocolate sauce, chocolate syrup",
|
57 |
+
"n04099969": "rocking chair, rocker",
|
58 |
+
"n04592741": "wing",
|
59 |
+
"n03891251": "park bench",
|
60 |
+
"n02701002": "ambulance",
|
61 |
+
"n03379051": "football helmet",
|
62 |
+
"n02259212": "leafhopper",
|
63 |
+
"n07715103": "cauliflower",
|
64 |
+
"n03947888": "pirate, pirate ship",
|
65 |
+
"n04026417": "purse",
|
66 |
+
"n02326432": "hare",
|
67 |
+
"n03637318": "lampshade, lamp shade",
|
68 |
+
"n01980166": "fiddler crab",
|
69 |
+
"n02113799": "standard poodle",
|
70 |
+
"n02086240": "Shih-Tzu",
|
71 |
+
"n03903868": "pedestal, plinth, footstall",
|
72 |
+
"n02483362": "gibbon, Hylobates lar",
|
73 |
+
"n04127249": "safety pin",
|
74 |
+
"n02089973": "English foxhound",
|
75 |
+
"n03017168": "chime, bell, gong",
|
76 |
+
"n02093428": "American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier",
|
77 |
+
"n02804414": "bassinet",
|
78 |
+
"n02396427": "wild boar, boar, Sus scrofa",
|
79 |
+
"n04418357": "theater curtain, theatre curtain",
|
80 |
+
"n02172182": "dung beetle",
|
81 |
+
"n01729322": "hognose snake, puff adder, sand viper",
|
82 |
+
"n02113978": "Mexican hairless",
|
83 |
+
"n03787032": "mortarboard",
|
84 |
+
"n02089867": "Walker hound, Walker foxhound",
|
85 |
+
"n02119022": "red fox, Vulpes vulpes",
|
86 |
+
"n03777754": "modem",
|
87 |
+
"n04238763": "slide rule, slipstick",
|
88 |
+
"n02231487": "walking stick, walkingstick, stick insect",
|
89 |
+
"n03032252": "cinema, movie theater, movie theatre, movie house, picture palace",
|
90 |
+
"n02138441": "meerkat, mierkat",
|
91 |
+
"n02104029": "kuvasz",
|
92 |
+
"n03837869": "obelisk",
|
93 |
+
"n03494278": "harmonica, mouth organ, harp, mouth harp",
|
94 |
+
"n04136333": "sarong",
|
95 |
+
"n03794056": "mousetrap",
|
96 |
+
"n03492542": "hard disc, hard disk, fixed disk",
|
97 |
+
"n02018207": "American coot, marsh hen, mud hen, water hen, Fulica americana",
|
98 |
+
"n04067472": "reel",
|
99 |
+
"n03930630": "pickup, pickup truck",
|
100 |
+
"n03584829": "iron, smoothing iron",
|
101 |
+
"n02123045": "tabby, tabby cat",
|
102 |
+
"n04229816": "ski mask",
|
103 |
+
"n02100583": "vizsla, Hungarian pointer",
|
104 |
+
"n03642806": "laptop, laptop computer",
|
105 |
+
"n04336792": "stretcher",
|
106 |
+
"n03259280": "Dutch oven",
|
107 |
+
"n02116738": "African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus",
|
108 |
+
"n02108089": "boxer",
|
109 |
+
"n03424325": "gasmask, respirator, gas helmet",
|
110 |
+
"n01855672": "goose",
|
111 |
+
"n02090622": "borzoi, Russian wolfhound",
|
112 |
+
}
|
113 |
+
)
|
generate.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Generate resized ImageNet-100 dataset.
|
3 |
+
"""
|
4 |
+
|
5 |
+
from argparse import ArgumentParser
|
6 |
+
from functools import partial
|
7 |
+
from pathlib import Path
|
8 |
+
|
9 |
+
from datasets import load_dataset
|
10 |
+
from torchvision.transforms import InterpolationMode
|
11 |
+
from torchvision.transforms.functional import resize
|
12 |
+
|
13 |
+
SCRIPT = str(Path(__file__).parent / "imagenet-100.py")
|
14 |
+
|
15 |
+
|
16 |
+
def transforms(examples, size: int = 160):
|
17 |
+
examples["image"] = [
|
18 |
+
resize(image, size, interpolation=InterpolationMode.BICUBIC)
|
19 |
+
for image in examples["image"]
|
20 |
+
]
|
21 |
+
return examples
|
22 |
+
|
23 |
+
|
24 |
+
if __name__ == "__main__":
|
25 |
+
parser = ArgumentParser()
|
26 |
+
parser.add_argument("--outdir", "-o", type=str, default="cache")
|
27 |
+
parser.add_argument("--size", "-s", type=int, default=160)
|
28 |
+
parser.add_argument("--num-proc", "-n", type=int, default=8)
|
29 |
+
args = parser.parse_args()
|
30 |
+
|
31 |
+
dataset = load_dataset(SCRIPT)
|
32 |
+
dataset = dataset.map(
|
33 |
+
partial(transforms, size=args.size),
|
34 |
+
batched=True,
|
35 |
+
batch_size=256,
|
36 |
+
num_proc=args.num_proc,
|
37 |
+
)
|
38 |
+
print(dataset)
|
39 |
+
print(dataset["validation"][0])
|
40 |
+
|
41 |
+
outdir = Path(args.outdir) / f"imagenet-100_{args.size}"
|
42 |
+
dataset.save_to_disk(outdir, num_proc=args.num_proc)
|
imagenet-100.py
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Dataset builder for ImageNet-100.
|
3 |
+
|
4 |
+
References:
|
5 |
+
https://huggingface.co/datasets/imagenet-1k/blob/main/imagenet-1k.py
|
6 |
+
"""
|
7 |
+
|
8 |
+
import os
|
9 |
+
from pathlib import Path
|
10 |
+
from typing import List
|
11 |
+
|
12 |
+
import datasets
|
13 |
+
from datasets.tasks import ImageClassification
|
14 |
+
|
15 |
+
from .classes import IMAGENET100_CLASSES
|
16 |
+
|
17 |
+
_CITATION = """\
|
18 |
+
@inproceedings{tian2020contrastive,
|
19 |
+
title={Contrastive multiview coding},
|
20 |
+
author={Tian, Yonglong and Krishnan, Dilip and Isola, Phillip},
|
21 |
+
booktitle={Computer Vision--ECCV 2020: 16th European Conference, Glasgow, UK, August 23--28, 2020, Proceedings, Part XI 16},
|
22 |
+
pages={776--794},
|
23 |
+
year={2020},
|
24 |
+
organization={Springer}
|
25 |
+
}
|
26 |
+
"""
|
27 |
+
|
28 |
+
_HOMEPAGE = "https://github.com/HobbitLong/CMC"
|
29 |
+
|
30 |
+
_DESCRIPTION = f"""\
|
31 |
+
ImageNet-100 is a subset of ImageNet with 100 classes randomly selected from the original ImageNet-1k dataset.
|
32 |
+
"""
|
33 |
+
|
34 |
+
_IMAGENET_ROOT = os.environ.get("IMAGENET_ROOT", "/data/imagenet")
|
35 |
+
|
36 |
+
_DATA_URL = {
|
37 |
+
"train": [f"{_IMAGENET_ROOT}/train/{label}" for label in IMAGENET100_CLASSES],
|
38 |
+
"val": [f"{_IMAGENET_ROOT}/val/{label}" for label in IMAGENET100_CLASSES],
|
39 |
+
}
|
40 |
+
|
41 |
+
|
42 |
+
class Imagenet100(datasets.GeneratorBasedBuilder):
|
43 |
+
VERSION = datasets.Version("1.0.0")
|
44 |
+
|
45 |
+
DEFAULT_WRITER_BATCH_SIZE = 1000
|
46 |
+
|
47 |
+
def _info(self):
|
48 |
+
assert len(IMAGENET100_CLASSES) == 100
|
49 |
+
return datasets.DatasetInfo(
|
50 |
+
description=_DESCRIPTION,
|
51 |
+
features=datasets.Features(
|
52 |
+
{
|
53 |
+
"image": datasets.Image(),
|
54 |
+
"label": datasets.ClassLabel(
|
55 |
+
names=list(IMAGENET100_CLASSES.values())
|
56 |
+
),
|
57 |
+
}
|
58 |
+
),
|
59 |
+
homepage=_HOMEPAGE,
|
60 |
+
citation=_CITATION,
|
61 |
+
task_templates=[
|
62 |
+
ImageClassification(image_column="image", label_column="label")
|
63 |
+
],
|
64 |
+
)
|
65 |
+
|
66 |
+
def _split_generators(self, dl_manager):
|
67 |
+
"""Returns SplitGenerators."""
|
68 |
+
|
69 |
+
return [
|
70 |
+
datasets.SplitGenerator(
|
71 |
+
name=datasets.Split.TRAIN,
|
72 |
+
gen_kwargs={"folders": _DATA_URL["train"]},
|
73 |
+
),
|
74 |
+
datasets.SplitGenerator(
|
75 |
+
name=datasets.Split.VALIDATION,
|
76 |
+
gen_kwargs={"folders": _DATA_URL["val"]},
|
77 |
+
),
|
78 |
+
]
|
79 |
+
|
80 |
+
def _generate_examples(self, folders: List[str]):
|
81 |
+
"""Yields examples."""
|
82 |
+
idx = 0
|
83 |
+
for folder in folders:
|
84 |
+
synset_id = Path(folder).name
|
85 |
+
label = IMAGENET100_CLASSES[synset_id]
|
86 |
+
|
87 |
+
for path in Path(folder).glob("*.JPEG"):
|
88 |
+
ex = {"image": str(path), "label": label}
|
89 |
+
yield idx, ex
|
90 |
+
idx += 1
|