foodvision_mini / app.py
FiratIsmailoglu's picture
Upload 7 files
3acd84d verified
raw
history blame contribute delete
No virus
1.8 kB
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 8 13:00:08 2024
@author: firis
"""
import gradio as gr
import os
import torch
from model import create_eff_model
from timeit import default_timer as timer
class_names=["pizza","steak","sushi"]
eff_model,eff_model_transform=create_eff_model() #bu standart model
eff_model_dict=torch.load("20_percent_data_effnet1.pth")
eff_model.load_state_dict(eff_model_dict)
eff_model.to("cpu")
#prediction function
def predict(img):
start_time = timer()
img=eff_model_transform(img).unsqueeze(0)
eff_model.eval()
with torch.inference_mode():
pred_and_probs=torch.softmax(eff_model(img),dim=1)
class_with_pred_dict={cl:float(pred_and_probs[0][ind]) for ind,cl in enumerate(class_names)}
pred_time = round(timer() - start_time, 5)
return class_with_pred_dict, pred_time
############# Gradio Interface ##########
title = "FoodVision Mini πŸ•πŸ₯©πŸ£"
description = "An EfficientNetB2 feature extractor computer vision model to classify images of food as pizza, steak or sushi."
example_list = [["examples/" + example] for example in os.listdir("examples")]
# Create the Gradio demo
demo = gr.Interface(fn=predict, # mapping function from input to output
inputs=gr.Image(type="pil"), # what are the inputs?
outputs=[gr.Label(num_top_classes=3, label="Predictions"), # what are the outputs?
gr.Number(label="Prediction time (s)")], # our fn has two outputs, therefore we have two outputs
examples=example_list,
title=title,
description=description)
# Launch the demo!
demo.launch(debug=False, # print errors locally?
share=True)