# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # TODO: Address all TODOs and remove all explanatory comments """TODO: Add a description here.""" import csv import json import os from pathlib import Path import datasets from datasets.tasks import ImageClassification import numpy as np _CITATION = """\ @article{FeiFei2004LearningGV, title={Learning Generative Visual Models from Few Training Examples: An Incremental Bayesian Approach Tested on 101 Object Categories}, author={Li Fei-Fei and Rob Fergus and Pietro Perona}, journal={Computer Vision and Pattern Recognition Workshop}, year={2004}, } """ _DESCRIPTION = """\ Pictures of objects belonging to 101 categories. About 40 to 800 images per category. Most categories have about 50 images. Collected in September 2003 by Fei-Fei Li, Marco Andreetto, and Marc'Aurelio Ranzato. The size of each image is roughly 300 x 200 pixels. """ _HOMEPAGE = "https://data.caltech.edu/records/20086" _LICENSE = "CC BY 4.0" _DATA_URL = "caltech-101.zip" _NAMES = [ "accordion", "airplanes", "anchor", "ant", "background_google", "barrel", "bass", "beaver", "binocular", "bonsai", "brain", "brontosaurus", "buddha", "butterfly", "camera", "cannon", "car_side", "ceiling_fan", "cellphone", "chair", "chandelier", "cougar_body", "cougar_face", "crab", "crayfish", "crocodile", "crocodile_head", "cup", "dalmatian", "dollar_bill", "dolphin", "dragonfly", "electric_guitar", "elephant", "emu", "euphonium", "ewer", "faces", "faces_easy", "ferry", "flamingo", "flamingo_head", "garfield", "gerenuk", "gramophone", "grand_piano", "hawksbill", "headphone", "hedgehog", "helicopter", "ibis", "inline_skate", "joshua_tree", "kangaroo", "ketch", "lamp", "laptop", "leopards", "llama", "lobster", "lotus", "mandolin", "mayfly", "menorah", "metronome", "minaret", "motorbikes", "nautilus", "octopus", "okapi", "pagoda", "panda", "pigeon", "pizza", "platypus", "pyramid", "revolver", "rhino", "rooster", "saxophone", "schooner", "scissors", "scorpion", "sea_horse", "snoopy", "soccer_ball", "stapler", "starfish", "stegosaurus", "stop_sign", "strawberry", "sunflower", "tick", "trilobite", "umbrella", "watch", "water_lilly", "wheelchair", "wild_cat", "windsor_chair", "wrench", "yin_yang", ] _TRAIN_POINTS_PER_CLASS = 30 class Caltech101(datasets.GeneratorBasedBuilder): """Caltech 101 dataset.""" VERSION = datasets.Version("1.0.0") def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "image": datasets.Image(), "label": datasets.features.ClassLabel(names=_NAMES), } ), supervised_keys=("image", "label"), homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, task_templates=ImageClassification( image_column="image", label_column="label" ), ) def _split_generators(self, dl_manager): data_root_dir = dl_manager.download_and_extract(_DATA_URL) compress_folder_path = [file for file in dl_manager.iter_files(data_root_dir) if Path(file).name == "101_ObjectCategories.tar.gz"][0] data_dir = dl_manager.extract(compress_folder_path) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": data_dir, # TODO: change accordingly "split": "train", }, ), datasets.SplitGenerator( name=datasets.Split.TEST, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": data_dir, # TODO: change accordingly "split": "test", }, ), ] # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` def _generate_examples(self, filepath, split): # Same stratagy as the one proposed in TF datasets is_train_split = (split == "train") data_dir = Path(filepath) / "101_ObjectCategories" # Sets random seed so the random partitioning of files is the same when # called for the train and test splits. numpy_original_state = np.random.get_state() np.random.seed(1234) for class_dir in data_dir.iterdir(): fnames = [image_path for image_path in class_dir.iterdir() if image_path.name.endswith(".jpg")] # _TRAIN_POINTS_PER_CLASS datapoints are sampled for the train split, # the others constitute the test split. if _TRAIN_POINTS_PER_CLASS > len(fnames): raise ValueError("Fewer than {} ({}) points in class {}".format( _TRAIN_POINTS_PER_CLASS, len(fnames), class_dir.name)) train_fnames = np.random.choice( fnames, _TRAIN_POINTS_PER_CLASS, replace=False) test_fnames = set(fnames).difference(train_fnames) fnames_to_emit = train_fnames if is_train_split else test_fnames for image_file in fnames_to_emit: record = { "image": str(image_file), "label": class_dir.name.lower(), } yield "%s/%s" % (class_dir.name.lower(), image_file), record # Resets the seeds to their previous states. np.random.set_state(numpy_original_state)