Spaces:
Running
Running
abrar-adnan
commited on
Commit
•
961c5fc
1
Parent(s):
4d6bb26
Update app.py
Browse files
app.py
CHANGED
@@ -6,15 +6,32 @@ from fastai.vision.all import load_learner
|
|
6 |
import time
|
7 |
import chardet
|
8 |
import base64
|
|
|
9 |
|
10 |
# import pathlib
|
11 |
# temp = pathlib.PosixPath
|
12 |
# pathlib.PosixPath = pathlib.WindowsPath
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
model = load_learner("gaze-recognizer-v3.pkl")
|
16 |
|
17 |
def video_processing(video_file, encoded_video):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
if encoded_video != "":
|
20 |
|
@@ -45,6 +62,8 @@ def video_processing(video_file, encoded_video):
|
|
45 |
|
46 |
# Convert the frame to RGB color (face_recognition uses RGB)
|
47 |
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
|
|
|
|
48 |
|
49 |
|
50 |
# Find all the faces in the frame using a pre-trained convolutional neural network.
|
@@ -56,10 +75,17 @@ def video_processing(video_file, encoded_video):
|
|
56 |
for top, right, bottom, left in face_locations:
|
57 |
# cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
|
58 |
face_image = gray[top:bottom, left:right]
|
|
|
59 |
|
60 |
# Resize the face image to the desired size
|
61 |
resized_face_image = cv2.resize(face_image, (128,128))
|
62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
# Predict the class of the resized face image using the model
|
64 |
result = model.predict(resized_face_image)
|
65 |
print(result[0])
|
@@ -83,7 +109,30 @@ def video_processing(video_file, encoded_video):
|
|
83 |
if os.path.exists("temp_video.mp4"):
|
84 |
os.remove("temp_video.mp4")
|
85 |
print(gaze_percentage)
|
86 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
|
88 |
|
89 |
demo = gr.Interface(fn=video_processing,
|
|
|
6 |
import time
|
7 |
import chardet
|
8 |
import base64
|
9 |
+
from deepface import DeepFace
|
10 |
|
11 |
# import pathlib
|
12 |
# temp = pathlib.PosixPath
|
13 |
# pathlib.PosixPath = pathlib.WindowsPath
|
14 |
|
15 |
+
backends = [
|
16 |
+
'opencv',
|
17 |
+
'ssd',
|
18 |
+
'dlib',
|
19 |
+
'mtcnn',
|
20 |
+
'retinaface',
|
21 |
+
'mediapipe'
|
22 |
+
]
|
23 |
|
24 |
model = load_learner("gaze-recognizer-v3.pkl")
|
25 |
|
26 |
def video_processing(video_file, encoded_video):
|
27 |
+
angry = 0
|
28 |
+
disgust = 0
|
29 |
+
fear = 0
|
30 |
+
happy = 0
|
31 |
+
sad = 0
|
32 |
+
surprise = 0
|
33 |
+
neutral = 0
|
34 |
+
emotion_count = 0
|
35 |
|
36 |
if encoded_video != "":
|
37 |
|
|
|
62 |
|
63 |
# Convert the frame to RGB color (face_recognition uses RGB)
|
64 |
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
65 |
+
|
66 |
+
|
67 |
|
68 |
|
69 |
# Find all the faces in the frame using a pre-trained convolutional neural network.
|
|
|
75 |
for top, right, bottom, left in face_locations:
|
76 |
# cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
|
77 |
face_image = gray[top:bottom, left:right]
|
78 |
+
color_image = frame[top:bottom, left:right]
|
79 |
|
80 |
# Resize the face image to the desired size
|
81 |
resized_face_image = cv2.resize(face_image, (128,128))
|
82 |
|
83 |
+
try:
|
84 |
+
emotion = DeepFace.analyze(color_image,actions=['emotion'],detector_backend = backends[2],enforce_detection = False)# 2,3, 4 works
|
85 |
+
total_emotion += 1
|
86 |
+
except Exception as e:
|
87 |
+
pass
|
88 |
+
|
89 |
# Predict the class of the resized face image using the model
|
90 |
result = model.predict(resized_face_image)
|
91 |
print(result[0])
|
|
|
109 |
if os.path.exists("temp_video.mp4"):
|
110 |
os.remove("temp_video.mp4")
|
111 |
print(gaze_percentage)
|
112 |
+
|
113 |
+
angry = angry / emotion_count
|
114 |
+
disgust = disgust / emotion_count
|
115 |
+
fear = fear / emotion_count
|
116 |
+
happy = happy / emotion_count
|
117 |
+
sad = sad / emotion_count
|
118 |
+
surprise = surprise / emotion_count
|
119 |
+
neutral = neutral / emotion_count
|
120 |
+
|
121 |
+
angry = 'total anger percentage' + angry
|
122 |
+
disgust = 'total disgust percentage' + disgust
|
123 |
+
fear = 'total fear percentage' + fear
|
124 |
+
happy = 'total happy percentage' + happy
|
125 |
+
sad = 'total sad percentage' + sad
|
126 |
+
surprise = 'total surprise percentage' + surprise
|
127 |
+
neutral = 'total neutral percentage' + neutral
|
128 |
+
print(f'total anger percentage = {angry}')
|
129 |
+
print(f'total disgust percentage = {disgust}')
|
130 |
+
print(f'total fear percentage = {fear}')
|
131 |
+
print(f'total happy percentage = {happy}')
|
132 |
+
print(f'total sad percentage = {sad}')
|
133 |
+
print(f'total surprise percentage = {surprise}')
|
134 |
+
print(f'total neutral percentage = {neutral}')
|
135 |
+
return str(gaze_percentage,angry,disgust,fear,happy,sad,surprise,neutral)
|
136 |
|
137 |
|
138 |
demo = gr.Interface(fn=video_processing,
|