kamangir
commited on
Commit
•
8dd0f73
1
Parent(s):
183bd3e
validating train - kamangir/bolt#689
Browse files- image_classifier/__init__.py +1 -1
- image_classifier/classes.py +17 -28
image_classifier/__init__.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
name = "image_classifier"
|
2 |
|
3 |
-
version = "1.1.
|
4 |
|
5 |
description = "fashion-mnist + hugging-face + awesome-bash-cli"
|
|
|
1 |
name = "image_classifier"
|
2 |
|
3 |
+
version = "1.1.45"
|
4 |
|
5 |
description = "fashion-mnist + hugging-face + awesome-bash-cli"
|
image_classifier/classes.py
CHANGED
@@ -1,8 +1,12 @@
|
|
1 |
from .plot import *
|
2 |
from abcli import file
|
|
|
3 |
from abcli import string
|
4 |
import numpy as np
|
5 |
import matplotlib.pyplot as plt
|
|
|
|
|
|
|
6 |
import abcli.logging
|
7 |
import logging
|
8 |
|
@@ -60,9 +64,7 @@ class Image_Classifier(object):
|
|
60 |
|
61 |
return True
|
62 |
|
63 |
-
def predict(self, test_images, test_labels, output_path="",
|
64 |
-
options = Options(options).default("cache", False).default("page_count", -1)
|
65 |
-
|
66 |
logger.info(
|
67 |
"image_classifier.predict({},{}){}".format(
|
68 |
string.pretty_size_of_matrix(test_images),
|
@@ -100,10 +102,6 @@ class Image_Classifier(object):
|
|
100 |
cm = cm / np.sum(cm, axis=1)[:, np.newaxis]
|
101 |
logger.debug("confusion_matrix: {}".format(cm))
|
102 |
|
103 |
-
if options["cache"]:
|
104 |
-
if not cache.write("{}.confusion_matrix".format(self.object_name), cm):
|
105 |
-
return False
|
106 |
-
|
107 |
if not file.save("{}/confusion_matrix.pyndarray".format(output_path), cm):
|
108 |
return False
|
109 |
|
@@ -147,8 +145,8 @@ class Image_Classifier(object):
|
|
147 |
return False
|
148 |
|
149 |
max_index = test_images.shape[0]
|
150 |
-
if
|
151 |
-
max_index = min(24 *
|
152 |
offset = int(np.max(np.array(objects.list_of_frames(output_path) + [-1]))) + 1
|
153 |
logger.info(
|
154 |
"image_classifier.predict(offset={}): rendering {} frame(s)...".format(
|
@@ -285,16 +283,9 @@ class Image_Classifier(object):
|
|
285 |
]
|
286 |
|
287 |
@staticmethod
|
288 |
-
def train(data_path, model_path,
|
289 |
-
|
290 |
-
|
291 |
-
.default("color", False)
|
292 |
-
.default("convnet", True)
|
293 |
-
.default("epochs", 10)
|
294 |
-
)
|
295 |
-
|
296 |
-
classifier = image_classifier()
|
297 |
-
classifier.params["convnet"] = options["convnet"]
|
298 |
|
299 |
logger.info(
|
300 |
"image_classifier.train({}) -{}> {}".format(
|
@@ -304,7 +295,7 @@ class Image_Classifier(object):
|
|
304 |
)
|
305 |
)
|
306 |
|
307 |
-
success, train_images = file.load("{}/train_images.pyndarray"
|
308 |
if success:
|
309 |
success, train_labels = file.load(f"{data_path}/train_labels.pyndarray")
|
310 |
if success:
|
@@ -326,14 +317,14 @@ class Image_Classifier(object):
|
|
326 |
window_size = train_images.shape[1]
|
327 |
input_shape = (
|
328 |
(window_size, window_size, 3)
|
329 |
-
if
|
330 |
else (window_size, window_size, 1)
|
331 |
-
if
|
332 |
else (window_size, window_size)
|
333 |
)
|
334 |
logger.info(f"input_shape:{string.pretty_size(input_shape)}")
|
335 |
|
336 |
-
if
|
337 |
train_images = np.expand_dims(train_images, axis=3)
|
338 |
test_images = np.expand_dims(test_images, axis=3)
|
339 |
|
@@ -343,15 +334,13 @@ class Image_Classifier(object):
|
|
343 |
):
|
344 |
logger.info("{}: {}".format(name, string.pretty_size_of_matrix(thing)))
|
345 |
logger.info(
|
346 |
-
"{} class(es): {}"
|
347 |
-
len(classifier.class_names), classifier.class_names
|
348 |
-
)
|
349 |
)
|
350 |
|
351 |
train_images = train_images / 255.0
|
352 |
test_images = test_images / 255.0
|
353 |
|
354 |
-
if
|
355 |
# https://medium.com/swlh/convolutional-neural-networks-for-multiclass-image-classification-a-beginners-guide-to-6dbc09fabbd
|
356 |
classifier.model = tf.keras.Sequential(
|
357 |
[
|
@@ -396,7 +385,7 @@ class Image_Classifier(object):
|
|
396 |
metrics=["accuracy"],
|
397 |
)
|
398 |
|
399 |
-
classifier.model.fit(train_images, train_labels, epochs=
|
400 |
|
401 |
test_accuracy = float(
|
402 |
classifier.model.evaluate(test_images, test_labels, verbose=2)[1]
|
|
|
1 |
from .plot import *
|
2 |
from abcli import file
|
3 |
+
from abcli import path
|
4 |
from abcli import string
|
5 |
import numpy as np
|
6 |
import matplotlib.pyplot as plt
|
7 |
+
import tensorflow as tf
|
8 |
+
from tqdm import *
|
9 |
+
import time
|
10 |
import abcli.logging
|
11 |
import logging
|
12 |
|
|
|
64 |
|
65 |
return True
|
66 |
|
67 |
+
def predict(self, test_images, test_labels, output_path="", page_count=-1):
|
|
|
|
|
68 |
logger.info(
|
69 |
"image_classifier.predict({},{}){}".format(
|
70 |
string.pretty_size_of_matrix(test_images),
|
|
|
102 |
cm = cm / np.sum(cm, axis=1)[:, np.newaxis]
|
103 |
logger.debug("confusion_matrix: {}".format(cm))
|
104 |
|
|
|
|
|
|
|
|
|
105 |
if not file.save("{}/confusion_matrix.pyndarray".format(output_path), cm):
|
106 |
return False
|
107 |
|
|
|
145 |
return False
|
146 |
|
147 |
max_index = test_images.shape[0]
|
148 |
+
if page_count != -1:
|
149 |
+
max_index = min(24 * page_count, max_index)
|
150 |
offset = int(np.max(np.array(objects.list_of_frames(output_path) + [-1]))) + 1
|
151 |
logger.info(
|
152 |
"image_classifier.predict(offset={}): rendering {} frame(s)...".format(
|
|
|
283 |
]
|
284 |
|
285 |
@staticmethod
|
286 |
+
def train(data_path, model_path, color=False, convnet=True, epochs=10):
|
287 |
+
classifier = Image_Classifier()
|
288 |
+
classifier.params["convnet"] = convnet
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
289 |
|
290 |
logger.info(
|
291 |
"image_classifier.train({}) -{}> {}".format(
|
|
|
295 |
)
|
296 |
)
|
297 |
|
298 |
+
success, train_images = file.load(f"{data_path}/train_images.pyndarray")
|
299 |
if success:
|
300 |
success, train_labels = file.load(f"{data_path}/train_labels.pyndarray")
|
301 |
if success:
|
|
|
317 |
window_size = train_images.shape[1]
|
318 |
input_shape = (
|
319 |
(window_size, window_size, 3)
|
320 |
+
if color
|
321 |
else (window_size, window_size, 1)
|
322 |
+
if convnet
|
323 |
else (window_size, window_size)
|
324 |
)
|
325 |
logger.info(f"input_shape:{string.pretty_size(input_shape)}")
|
326 |
|
327 |
+
if convnet and not color:
|
328 |
train_images = np.expand_dims(train_images, axis=3)
|
329 |
test_images = np.expand_dims(test_images, axis=3)
|
330 |
|
|
|
334 |
):
|
335 |
logger.info("{}: {}".format(name, string.pretty_size_of_matrix(thing)))
|
336 |
logger.info(
|
337 |
+
f"{len(classifier.class_names)} class(es): {', '.join(classifier.class_names)}"
|
|
|
|
|
338 |
)
|
339 |
|
340 |
train_images = train_images / 255.0
|
341 |
test_images = test_images / 255.0
|
342 |
|
343 |
+
if convnet:
|
344 |
# https://medium.com/swlh/convolutional-neural-networks-for-multiclass-image-classification-a-beginners-guide-to-6dbc09fabbd
|
345 |
classifier.model = tf.keras.Sequential(
|
346 |
[
|
|
|
385 |
metrics=["accuracy"],
|
386 |
)
|
387 |
|
388 |
+
classifier.model.fit(train_images, train_labels, epochs=epochs)
|
389 |
|
390 |
test_accuracy = float(
|
391 |
classifier.model.evaluate(test_images, test_labels, verbose=2)[1]
|