File size: 4,207 Bytes
5971329
88142f2
5971329
 
b125ffc
 
 
5971329
b125ffc
5971329
136ac77
5971329
 
b125ffc
5971329
 
136ac77
 
132b5af
320484d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cdadc4f
 
 
2b8f874
 
 
1753039
5971329
320484d
cdadc4f
 
 
 
 
 
 
136ac77
cdadc4f
 
136ac77
cdadc4f
caca981
6931130
caca981
320484d
cdadc4f
136ac77
320484d
b207167
136ac77
 
2b8f874
136ac77
 
 
c430995
b03b518
 
136ac77
 
99b7bd9
 
 
 
 
 
 
 
136ac77
 
 
 
 
fdc7694
136ac77
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
import gradio as gr
import os
import cv2
import numpy as np
import imutils
from keras.preprocessing.image import img_to_array
from keras.models import load_model

# Load the pre-trained models and define parameters
detection_model_path = 'haarcascade_files/haarcascade_frontalface_default.xml'
emotion_model_path = 'model4_0.83/model4_entire_model.h5'
face_detection = cv2.CascadeClassifier(detection_model_path)
emotion_classifier = load_model(emotion_model_path, compile=False)
EMOTIONS = ['neutral', 'happiness', 'surprise', 'sadness', 'anger', 'disgust', 'fear', 'contempt', 'unknown']


# face_detector_mtcnn = MTCNN()
classifier = load_model(emotion_model_path)

# def predict_emotion(frame):
#     frame = imutils.resize(frame, width=300)
#     gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
#     faces = face_detection.detectMultiScale(gray, scaleFactor=1.1,
#                                             minNeighbors=5, minSize=(30, 30),
#                                             flags=cv2.CASCADE_SCALE_IMAGE)

#     frame_clone = frame.copy()
#     if len(faces) > 0:
#         faces = sorted(faces, reverse=True,
#                        key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0]
#         (fX, fY, fW, fH) = faces
        
#         # Extract the ROI of the face from the grayscale image, resize it to a fixed 28x28 pixels, and then prepare
#         # the ROI for classification via the CNN
#         roi = gray[fY:fY + fH, fX:fX + fW]
#         roi = cv2.resize(roi, (48, 48))
#         roi = roi.astype("float") / 255.0
#         roi = img_to_array(roi)
#         roi = np.expand_dims(roi, axis=0)

#         preds = emotion_classifier.predict(roi)[0]
#         label = EMOTIONS[preds.argmax()]

#         # Overlay a box over the detected face
#         cv2.putText(frame_clone, label, (fX, fY + 100),
#                     cv2.FONT_HERSHEY_DUPLEX, 1, (238, 164, 64), 1)
#         cv2.rectangle(frame_clone, (fX, fY), (fX + fW, fY + fH),
#                       (238, 164, 64), 2)

#     else:
#         label = "Can't find your face"

#     return frame_clone


def predict_emotion(frame):
    frame = imutils.resize(frame, width=300)
    gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
    faces = face_detection.detectMultiScale(gray, scaleFactor=1.1,
                                            minNeighbors=5, minSize=(30, 30),
                                            flags=cv2.CASCADE_SCALE_IMAGE)
    frame_clone = frame.copy()

    for (fX, fY, fW, fH) in faces:
        # Extract the ROI of the face from the grayscale image, resize it to a fixed 28x28 pixels, and then prepare
        # the ROI for classification via the CNN
        roi = gray[fY:fY + fH, fX:fX + fW]
        roi = cv2.resize(roi, (48, 48))
        roi = roi.astype("float") / 255.0
        roi = img_to_array(roi)
        roi = np.expand_dims(roi, axis=0)

        preds = emotion_classifier.predict(roi)[0]
        label = EMOTIONS[preds.argmax()]

        # Overlay a box over the detected face
        cv2.putText(frame_clone, label, (fX, fY - 10),
                    cv2.FONT_HERSHEY_DUPLEX, 0.5, (238, 164, 64), 1)

        cv2.rectangle(frame, (fX, fY), (fX + fW, fY + fH),
                      (238, 164, 64), 2)

    return frame




demo = gr.Interface(
    fn = predict_emotion,
    inputs = gr.Image(type="numpy"),
    outputs = gr.Image(),
    # gr.components.Image(label="Predicted Emotion"),
    # gr.components.Label(num_top_classes=2, label="Top 2 Probabilities")
    #flagging_options=["blurry", "incorrect", "other"],
    examples = [
        
        os.path.join(os.path.dirname(__file__), "images/chandler.jpeg"),
        os.path.join(os.path.dirname(__file__), "images/janice.jpeg"),
        os.path.join(os.path.dirname(__file__), "images/joey.jpeg"),
        os.path.join(os.path.dirname(__file__), "images/phoebe.jpeg"),
        os.path.join(os.path.dirname(__file__), "images/rachel_monica.jpeg"),
        os.path.join(os.path.dirname(__file__), "images/ross.jpeg"),
        os.path.join(os.path.dirname(__file__), "images/gunther.jpeg")
     
    ],
    title = "Whatchu feeling?",
    theme = "shivi/calm_seafoam"
)
    


if __name__ == "__main__":
    demo.launch()