|
import gradio as gr |
|
import os |
|
import cv2 |
|
import PIL |
|
from PIL import Image |
|
from mtcnn import MTCNN |
|
import numpy as np |
|
from tensorflow.keras.models import load_model |
|
from keras.preprocessing.image import img_to_array |
|
|
|
emotions = ['neutral','happiness','surprise','sadness','anger','disgust','fear','contempt','unknown'] |
|
classifier = load_model("model_9.keras") |
|
face_detector_mtcnn = MTCNN() |
|
|
|
|
|
def predict_emotion(image): |
|
|
|
faces = face_detector_mtcnn.detect_faces(image) |
|
|
|
for face in faces: |
|
x,y,w,h = face['box'] |
|
|
|
roi = image[y:y+h,x:x+w] |
|
|
|
|
|
roi_gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY) |
|
roi_gray = cv2.resize(roi_gray,(48,48),interpolation=cv2.INTER_AREA) |
|
|
|
img = roi_gray.astype('float')/255.0 |
|
img = img_to_array(img) |
|
img = np.expand_dims(img,axis=0) |
|
|
|
prediction = classifier.predict(img)[0] |
|
|
|
|
|
|
|
|
|
confidences = {emotions[i]: float(prediction[i]) for i in range(len(emotions))} |
|
|
|
return confidences |
|
|
|
|
|
|
|
demo = gr.Interface( |
|
fn = predict_emotion, |
|
inputs = gr.Image(type="numpy"), |
|
outputs = gr.Label(num_top_classes=9), |
|
|
|
examples = [ |
|
os.path.join(os.path.dirname(__file__), "images/Image_1.jpg"), |
|
os.path.join(os.path.dirname(__file__), "images/Image_2.jpg"), |
|
os.path.join(os.path.dirname(__file__), "images/Image_3.jpg"), |
|
os.path.join(os.path.dirname(__file__), "images/Image_4.jpg"), |
|
os.path.join(os.path.dirname(__file__), "images/Image_5.jpg"), |
|
os.path.join(os.path.dirname(__file__), "images/Image_6.jpg"), |
|
os.path.join(os.path.dirname(__file__), "images/Image_7.jpg"), |
|
os.path.join(os.path.dirname(__file__), "images/Image_8.jpg"), |
|
os.path.join(os.path.dirname(__file__), "images/Image_9.jpg"), |
|
os.path.join(os.path.dirname(__file__), "images/Image_10.jpg"), |
|
|
|
], |
|
title = "Whatchu feeling?", |
|
theme = "shivi/calm_seafoam" |
|
) |
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
demo.launch() |