Datasets:
File size: 4,493 Bytes
808187d eadf52e 17acea3 808187d 17acea3 62e9177 51e6200 e774f45 808187d 3371f7f 808187d e774f45 35f32ee 808187d a014ab8 d11e825 808187d 5c728e6 d11e825 808187d 5d2aa2d 808187d 17acea3 d11e825 a014ab8 f875dbb e774f45 d11e825 15fe087 d11e825 ba7d78e f875dbb 4767376 f875dbb f6ef9d8 f875dbb 4767376 e774f45 f875dbb e774f45 3dede87 e774f45 3dede87 e774f45 c72d774 2667046 aaf8b27 2667046 3dede87 2667046 3dede87 e774f45 2667046 48f33f7 2667046 e774f45 3dede87 e774f45 3dede87 e774f45 1340d71 e774f45 a014ab8 e774f45 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 |
"""CC6204-Hackaton-Cub-Dataset: Multimodal"""
import os
import re
import datasets
import pandas as pd
from requests import get
datasets.logging.set_verbosity_debug()
logger = datasets.logging.get_logger(__name__)
#datasets.logging.set_verbosity_info()
datasets.logging.set_verbosity_debug()
_DESCRIPTION = "XYZ"
_CITATION = "XYZ"
_HOMEPAGE = "https://github.com/ivansipiran/CC6204-Deep-Learning/blob/main/Hackaton/hackaton.md"
_REPO = "https://huggingface.co/datasets/alkzar90/CC6204-Hackaton-Cub-Dataset/resolve/main/data"
_URLS = {
"train_test_split": f"{_REPO}/train_test_split.txt",
"classes": f"{_REPO}/classes.txt",
"image_class_labels": f"{_REPO}/image_class_labels.txt",
"images": f"{_REPO}/images.txt",
"image_urls": f"{_REPO}/images.zip",
"text_urls": f"{_REPO}/text.zip",
"mini_images_urls": f"{_REPO}/dummy/mini_images.zip"
}
# Create ClassId-to-label dictionary using the classes file
classes = get(_URLS["classes"]).iter_lines()
_ID2LABEL = {}
for row in classes:
row = row.decode("UTF8")
if row != "":
idx, label = row.split(" ")
_ID2LABEL[int(idx)] = re.search("[^\d\.\_+].+", label).group(0).replace("_", " ")
_NAMES = list(_ID2LABEL.values())
# Create imageId-to-ClassID dictionary using the image_class_labels
img_idx_2_class_idx = get(_URLS["image_class_labels"]).iter_lines()
_IMGID2CLASSID = {}
for row in img_idx_2_class_idx:
row = row.decode("UTF8")
if row != "":
idx, class_id = row.split(" ")
_IMGID2CLASSID[idx] = int(class_id)
# build from images.txt: a mapping from image_file_name -> id
imgpath_to_ids = get(_URLS["images"]).iter_lines()
_IMGNAME2ID = {}
for row in imgpath_to_ids:
row = row.decode("UTF8")
if row != "":
idx, img_name = row.split(" ")
_IMGNAME2ID[os.path.basename(img_name)] = idx
# Create TRAIN_IDX_SET
train_test_split = get(_URLS["train_test_split"]).iter_lines()
_TRAIN_IDX_SET = []
for row in train_test_split:
row = row.decode("UTF8")
if row != "":
idx, train_bool = row.split(" ")
# 1: train, 0: test
if train_bool == "1":
_TRAIN_IDX_SET.append(idx)
_TRAIN_IDX_SET = set(_TRAIN_IDX_SET)
class CubDataset(datasets.GeneratorBasedBuilder):
"""Cub Dataset"""
def _info(self):
features = datasets.Features({
"image": datasets.Image(),
"labels": datasets.features.ClassLabel(names=_NAMES),
})
keys = ("image", "labels")
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=keys,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
train_files = []
train_idx = []
test_files = []
test_idx = []
# Download images
data_files = dl_manager.download_and_extract(_URLS["mini_images_urls"])
path_files = dl_manager.iter_files(data_files)
for img in path_files:
img_idx = _IMGNAME2ID[os.path.basename(img)]
if img_idx in _TRAIN_IDX_SET:
train_files.append(img)
train_idx.append(img_idx)
else:
test_files.append(img)
test_idx.append(img_idx)
#for batch in data_files:
#path_files = dl_manager.iter_files(batch)
#for img in path_files:
#if img.endswith("\d+.jpg"):
#img_idx = _IMGNAME2ID[img]
#if img_idx in _TRAIN_IDX_SET:
#train_files.append(img)
#else:
#test_files.append(img)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"files": train_files,
"image_idx": train_idx
}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"files": test_files,
"image_idx": test_idx
}
)
]
def _generate_examples(self, files, image_idx):
for i, path in enumerate(files):
file_name = os.path.basename(path)
if file_name.endswith(".jpg"):
yield i, {
"image": path,
"labels": _ID2LABEL[_IMGID2CLASSID[image_idx[i]]],
}
|