Datasets:
File size: 3,526 Bytes
808187d eadf52e 808187d f7f90b0 62e9177 808187d 51e6200 e774f45 808187d 3371f7f 808187d e774f45 808187d d11e825 1ba6734 808187d 5c728e6 d11e825 808187d 62e9177 808187d d11e825 f875dbb e774f45 d11e825 15fe087 d11e825 9d9dbc4 f875dbb e774f45 f875dbb e774f45 f875dbb 90ab190 e774f45 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
"""CC6204-Hackaton-Cub-Dataset: Multimodal"""
import os
import re
import datasets
import pandas as pd
from requests import get
logger = datasets.logging.get_logger(__name__)
datasets.logging.set_verbosity_info()
_DESCRIPTION = "XYZ"
_CITATION = "XYZ"
_HOMEPAGE = "https://github.com/ivansipiran/CC6204-Deep-Learning/blob/main/Hackaton/hackaton.md"
_REPO = "https://huggingface.co/datasets/alkzar90/CC6204-Hackaton-Cub-Dataset/resolve/main/data"
_URLS = {
"train_test_split": f"{_REPO}/train_test_split.txt",
"classes": f"{_REPO}/classes.txt",
"image_class_labels": f"{_REPO}/image_class_labels.txt",
"images": f"{_REPO}/images.txt",
"image_urls": f"{_REPO}/images.zip",
"text_urls": f"{_REPO}/text.zip",
}
# Create id-to-label dictionary using the classes file
classes = get(_URLS["classes"]).iter_lines()
logger.info(f"classes: {classes}")
_ID2LABEL = {}
for row in classes:
row = row.decode("UTF8")
if row != "":
idx, label = row.split(" ")
_ID2LABEL[int(idx)] = re.search("[^\d\.\_+].+", label).group(0).replace("_", " ")
logger.info(f"_ID2LABEL: {_ID2LABEL}")
_NAMES = list(_ID2LABEL.values())
# build from images.txt: a mapping from image_file_name -> id
imgpath_to_ids = get(_URLS["images"]).iter_lines()
_IMGNAME2ID = {}
for row in imgpath_to_ids:
row = row.decode("UTF8")
if row != "":
idx, img_name = row.split(" ")
_IMGNAME2ID[os.path.basename(img_name)] = int(idx)
# Create TRAIN_IDX_SET
train_test_split = get(_URLS["train_test_split"]).iter_lines()
_TRAIN_IDX_SET = set()
for row in train_test_split:
row = row.decode("UTF8")
for row != "":
idx, train_bool = row.split(" ")
# 1: train, 0: test
if train_bool == "1":
_TRAIN_IDX_SET.add(int(idx))
class CubDataset(datasets.GeneratorBasedBuilder):
"""Cub Dataset"""
def _info(self):
features = datasets.Features({
"image": datasets.Image(),
"labels": datasets.features.ClassLabel(names=_NAMES),
})
keys = ("image", "labels")
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=keys,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
train_files = []
test_files = []
# Download images
data_files = dl_manager.download_and_extract(_URLS["image_urls"])
for batch in data_files:
path_files = dl_manager.iter_files(batch)
for img in path_files:
if _IMGNAME2ID[os.path.basename(img)] in _TRAIN_IDX_SET:
train_files.append(img)
else:
test_files.append(img)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"files": train_files
}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"files": test_files
}
)
]
def _generate_examples(self, files):
for i, path in enumerate(files):
file_name = os.path.basename(path)
if file_name.endswith(".jpg"):
yield i, {
"image": path,
"labels": os.path.basename(os.path.dirname(path)).lower(),
}
|