Datasets:
Update CC6204-Hackaton-Cub-Dataset.py
Browse files
CC6204-Hackaton-Cub-Dataset.py
CHANGED
@@ -29,7 +29,6 @@ _URLS = {
|
|
29 |
# Create id-to-label dictionary using the classes file
|
30 |
classes = get(_URLS["classes"]).iter_lines()
|
31 |
logger.info(f"classes: {classes}")
|
32 |
-
|
33 |
_ID2LABEL = {}
|
34 |
for row in classes:
|
35 |
row = row.decode("UTF8")
|
@@ -41,6 +40,7 @@ logger.info(f"_ID2LABEL: {_ID2LABEL}")
|
|
41 |
|
42 |
_NAMES = list(_ID2LABEL.values())
|
43 |
|
|
|
44 |
# build from images.txt: a mapping from image_file_name -> id
|
45 |
imgpath_to_ids = get(_URLS["images"]).iter_lines()
|
46 |
_IMGNAME2ID = {}
|
@@ -49,7 +49,18 @@ for row in imgpath_to_ids:
|
|
49 |
if row != "":
|
50 |
idx, img_name = row.split(" ")
|
51 |
_IMGNAME2ID[os.path.basename(img_name)] = int(idx)
|
52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
|
55 |
class CubDataset(datasets.GeneratorBasedBuilder):
|
@@ -71,12 +82,7 @@ class CubDataset(datasets.GeneratorBasedBuilder):
|
|
71 |
)
|
72 |
|
73 |
|
74 |
-
def _split_generators(self, dl_manager):
|
75 |
-
# 1: train, 0: test
|
76 |
-
train_test_split = get(_URLS["train_test_split"]).iter_lines()
|
77 |
-
train_images_idx = set([int(x.decode("UTF8").split(" ")[0]) for x in train_test_split if x.decode("UTF8").split(" ")[1] == "1"])
|
78 |
-
logger.info(f"train_images_idx length: {len(train_images_idx)}")
|
79 |
-
|
80 |
train_files = []
|
81 |
test_files = []
|
82 |
|
@@ -86,7 +92,7 @@ class CubDataset(datasets.GeneratorBasedBuilder):
|
|
86 |
for batch in data_files:
|
87 |
path_files = dl_manager.iter_files(batch)
|
88 |
for img in path_files:
|
89 |
-
if _IMGNAME2ID[os.path.basename(img)] in
|
90 |
train_files.append(img)
|
91 |
else:
|
92 |
test_files.append(img)
|
|
|
29 |
# Create id-to-label dictionary using the classes file
|
30 |
classes = get(_URLS["classes"]).iter_lines()
|
31 |
logger.info(f"classes: {classes}")
|
|
|
32 |
_ID2LABEL = {}
|
33 |
for row in classes:
|
34 |
row = row.decode("UTF8")
|
|
|
40 |
|
41 |
_NAMES = list(_ID2LABEL.values())
|
42 |
|
43 |
+
|
44 |
# build from images.txt: a mapping from image_file_name -> id
|
45 |
imgpath_to_ids = get(_URLS["images"]).iter_lines()
|
46 |
_IMGNAME2ID = {}
|
|
|
49 |
if row != "":
|
50 |
idx, img_name = row.split(" ")
|
51 |
_IMGNAME2ID[os.path.basename(img_name)] = int(idx)
|
52 |
+
|
53 |
+
|
54 |
+
# Create TRAIN_IDX_SET
|
55 |
+
train_test_split = get(_URLS["train_test_split"]).iter_lines()
|
56 |
+
_TRAIN_IDX_SET = set()
|
57 |
+
for row in train_test_split:
|
58 |
+
row = row.decode("UTF8")
|
59 |
+
for row != "":
|
60 |
+
idx, train_bool = row.split(" ")
|
61 |
+
# 1: train, 0: test
|
62 |
+
if train_bool == "1":
|
63 |
+
_TRAIN_IDX_SET.add(int(idx))
|
64 |
|
65 |
|
66 |
class CubDataset(datasets.GeneratorBasedBuilder):
|
|
|
82 |
)
|
83 |
|
84 |
|
85 |
+
def _split_generators(self, dl_manager):
|
|
|
|
|
|
|
|
|
|
|
86 |
train_files = []
|
87 |
test_files = []
|
88 |
|
|
|
92 |
for batch in data_files:
|
93 |
path_files = dl_manager.iter_files(batch)
|
94 |
for img in path_files:
|
95 |
+
if _IMGNAME2ID[os.path.basename(img)] in _TRAIN_IDX_SET:
|
96 |
train_files.append(img)
|
97 |
else:
|
98 |
test_files.append(img)
|