### -------------------------------- ### ### libraries ### ### -------------------------------- ### import gradio as gr import numpy as np import os import tensorflow as tf ### -------------------------------- ### ### model loading ### ### -------------------------------- ### model = tf.keras.models.load_model('model.h5') ## --------------------------------- ### ### reading: categories.txt ### ### -------------------------------- ### labels = ['please upload categories.txt' for i in range(10)] # placeholder if os.path.isfile("categories.txt"): # open categories.txt in read mode categories = open("categories.txt", "r") labels = categories.readline().split() ## --------------------------------- ### ### page description ### ### -------------------------------- ### title = "Seefood: Hot dog or... not hot dog" description = "A Hugging Space demo created by datasith!" article = \ ''' #### Hot dog or not hot dog Jìan-Yang's masterpiece from the show Silicon Valley serves as a great exercise to get familiar with Hugging Face spaces! All the necessary files are included for everything to run smoothly on HF's Spaces: - app.py - categories.txt - model.h5 (AlexNet) - requirements.txt - README.md - nay.jpg (Not-hot-dog example) - yay.jpg (Hot-dog example) The data used to train the model is available as a [Kaggle dataset](https://www.kaggle.com/datasets/dansbecker/hot-dog-not-hot-dog). The step-by-step process for generating, training, and testing the Image Classification model is available at my [GitHub respository](https://github.com/datasith/ds-experiments-image-classification/tree/main/hotdog-not-hotdog). If you enjoy my work feel free to follow me here on HF and/or on: - [GitHub](https://github.com/datasith) - [Kaggle](https://kaggle.com/datasith) - [Twitter](https://twitter.com/datasith) - [LinkedIn](https://linkedin.com/in/datasith) Either way, enjoy! ''' ### -------------------------------- ### ### interface creation ### ### -------------------------------- ### samples = ['yay.jpg', 'nay.jpg'] def preprocess(image): image = tf.image.resize(image, [256, 256]) img_array = tf.keras.utils.img_to_array(image) img_array = tf.expand_dims(img_array, 0) # image = np.array(image) / 255 # image = np.expand_dims(image, axis=0) return img_array def predict_image(image): # pred = model.predict(preprocess(image)) # results = {} # for row in pred: # for idx, item in enumerate(row): # results[labels[idx]] = float(item) predictions = model.predict(preprocess(image)) scores = tf.nn.softmax(predictions[0]) results = {} for idx, res in enumerate(scores): results[labels[idx]] = float(res) return results # generate img input and text label output image = gr.inputs.Image(label="Upload Your Image Here") label = gr.outputs.Label(num_top_classes=len(labels)) # generate and launch interface interface = gr.Interface(fn=predict_image, inputs=image, outputs=label, article=article, theme='default', title=title, allow_flagging='never', description=description, examples=samples) interface.launch()