Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,30 +1,23 @@
|
|
1 |
import gradio as gr
|
2 |
import os
|
3 |
-
import keras
|
4 |
-
from keras.preprocessing.image import img_to_array
|
5 |
-
import imutils
|
6 |
import cv2
|
7 |
-
from keras.models import load_model
|
8 |
import numpy as np
|
|
|
|
|
|
|
9 |
|
10 |
-
#
|
11 |
detection_model_path = 'haarcascade_files/haarcascade_frontalface_default.xml'
|
12 |
-
# emotion_model_path = 'model2/model2_entire_model.h5'
|
13 |
emotion_model_path = 'model_2_aug_nocall_BEST/model_2_aug_nocall_entire_model.h5'
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
# hyper-parameters for bounding boxes shape
|
18 |
-
# loading models
|
19 |
face_detection = cv2.CascadeClassifier(detection_model_path)
|
20 |
emotion_classifier = load_model(emotion_model_path, compile=False)
|
21 |
-
EMOTIONS = ['neutral','happiness','surprise','sadness','anger','disgust','fear','contempt','unknown']
|
22 |
-
|
23 |
|
|
|
24 |
def predict(frame_or_path):
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
frame = cv2.imread(frame_or_path)
|
29 |
if frame is None:
|
30 |
return None, "Error: Unable to read image or video."
|
@@ -48,65 +41,15 @@ def predict(frame_or_path):
|
|
48 |
(238, 164, 64), 2)
|
49 |
return frame, {emotion: float(prob) for emotion, prob in zip(EMOTIONS, preds)}
|
50 |
|
|
|
|
|
|
|
|
|
51 |
|
52 |
-
|
53 |
-
# # Define Gradio input and output components
|
54 |
-
# image_input = gr.components.Image(type='numpy', label="Upload Image or Video")
|
55 |
-
# image_output = gr.components.Image(label="Predicted Emotion")
|
56 |
-
# label_output = gr.components.Label(num_top_classes=2, label="Top 2 Probabilities")
|
57 |
-
|
58 |
-
# inp = [
|
59 |
-
# gr.components.Image(sources="webcam", label="Your face"),
|
60 |
-
# gr.components.File(label="Upload Image or Video")
|
61 |
-
# ]
|
62 |
-
# out = [
|
63 |
-
# gr.components.Image(label="Predicted Emotion"),
|
64 |
-
# gr.components.Label(num_top_classes=2, label="Top 2 Probabilities")
|
65 |
-
# ]
|
66 |
-
|
67 |
-
|
68 |
-
inp = [
|
69 |
-
gr.components.Image(sources="webcam", label="Your face"),
|
70 |
-
gr.components.File(label="Upload Image or Video")
|
71 |
-
]
|
72 |
-
out = [
|
73 |
-
gr.components.Image(label="Predicted Emotion"),
|
74 |
-
gr.components.Label(num_top_classes=2, label="Top 2 Probabilities")
|
75 |
-
]
|
76 |
-
|
77 |
-
example_images = [
|
78 |
-
[
|
79 |
-
os.path.join(os.path.dirname(__file__), "images/chandler.jpeg"),
|
80 |
-
os.path.join(os.path.dirname(__file__), "images/janice.jpeg"),
|
81 |
-
os.path.join(os.path.dirname(__file__), "images/joey.jpeg"),
|
82 |
-
os.path.join(os.path.dirname(__file__), "images/phoebe.jpeg"),
|
83 |
-
os.path.join(os.path.dirname(__file__), "images/rachel_monica.jpeg"),
|
84 |
-
os.path.join(os.path.dirname(__file__), "images/ross.jpeg"),
|
85 |
-
os.path.join(os.path.dirname(__file__), "images/gunther.jpeg")
|
86 |
-
]
|
87 |
-
]
|
88 |
-
# example_images = [
|
89 |
-
# ["images/chandler.jpeg"],
|
90 |
-
# ["images/janice.jpg"],
|
91 |
-
# ["images/joey.jpg"],
|
92 |
-
# ["images/phoebe.jpg"],
|
93 |
-
# ["images/rachel_monica.jpg"],
|
94 |
-
# ["images/ross.jpg"],
|
95 |
-
# ["images/gunther.jpg"]
|
96 |
-
# ]
|
97 |
-
|
98 |
title = "Facial Emotion Recognition"
|
99 |
-
description = "How well can this model predict your emotions? Take a picture with your webcam, and it will guess if"
|
100 |
-
" you are: happy, sad, angry, disgusted, scared, surprised, or neutral."
|
101 |
thumbnail = "https://raw.githubusercontent.com/gradio-app/hub-emotion-recognition/master/thumbnail.png"
|
102 |
|
103 |
-
|
104 |
-
|
105 |
-
gr.Interface(fn=predict, inputs=inp, outputs=out,
|
106 |
-
examples=example_images,title=title, thumbnail=thumbnail).launch()
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
# # Launch Gradio interface
|
111 |
-
# gr.Interface(fn=predict, inputs=image_input, outputs=[image_output, label_output],
|
112 |
-
# title="Facial Emotion Recognition", description="How well can this model predict your emotions?").launch()
|
|
|
1 |
import gradio as gr
|
2 |
import os
|
|
|
|
|
|
|
3 |
import cv2
|
|
|
4 |
import numpy as np
|
5 |
+
import imutils
|
6 |
+
from keras.preprocessing.image import img_to_array
|
7 |
+
from keras.models import load_model
|
8 |
|
9 |
+
# Load the pre-trained models and define parameters
|
10 |
detection_model_path = 'haarcascade_files/haarcascade_frontalface_default.xml'
|
|
|
11 |
emotion_model_path = 'model_2_aug_nocall_BEST/model_2_aug_nocall_entire_model.h5'
|
|
|
|
|
|
|
|
|
|
|
12 |
face_detection = cv2.CascadeClassifier(detection_model_path)
|
13 |
emotion_classifier = load_model(emotion_model_path, compile=False)
|
14 |
+
EMOTIONS = ['neutral', 'happiness', 'surprise', 'sadness', 'anger', 'disgust', 'fear', 'contempt', 'unknown']
|
|
|
15 |
|
16 |
+
# Function to predict emotions from a frame
|
17 |
def predict(frame_or_path):
|
18 |
+
if isinstance(frame_or_path, np.ndarray): # If input is a webcam frame
|
19 |
+
frame = imutils.resize(frame_or_path, width=300)
|
20 |
+
else: # If input is a file path
|
21 |
frame = cv2.imread(frame_or_path)
|
22 |
if frame is None:
|
23 |
return None, "Error: Unable to read image or video."
|
|
|
41 |
(238, 164, 64), 2)
|
42 |
return frame, {emotion: float(prob) for emotion, prob in zip(EMOTIONS, preds)}
|
43 |
|
44 |
+
# Define input and output components for Gradio
|
45 |
+
image_input = gr.inputs.Image(sources=["webcam", "upload"], label="Your face")
|
46 |
+
image_output = gr.outputs.Image(label="Predicted Emotion")
|
47 |
+
label_output = gr.outputs.Label(num_top_classes=2, label="Top 2 Probabilities")
|
48 |
|
49 |
+
# Launch the Gradio interface
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
title = "Facial Emotion Recognition"
|
51 |
+
description = "How well can this model predict your emotions? Take a picture with your webcam, or upload an image, and it will guess if you are happy, sad, angry, disgusted, scared, surprised, or neutral."
|
|
|
52 |
thumbnail = "https://raw.githubusercontent.com/gradio-app/hub-emotion-recognition/master/thumbnail.png"
|
53 |
|
54 |
+
gr.Interface(fn=predict, inputs=image_input, outputs=[image_output, label_output],
|
55 |
+
title=title, description=description, thumbnail=thumbnail).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|