Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -76,8 +76,8 @@
|
|
76 |
# demo.launch()
|
77 |
|
78 |
######################################################################################################################################################
|
79 |
-
|
80 |
import gradio as gr
|
|
|
81 |
import cv2
|
82 |
import numpy as np
|
83 |
import imutils
|
@@ -91,6 +91,7 @@ face_detection = cv2.CascadeClassifier(detection_model_path)
|
|
91 |
emotion_classifier = load_model(emotion_model_path, compile=False)
|
92 |
EMOTIONS = ['neutral', 'happiness', 'surprise', 'sadness', 'anger', 'disgust', 'fear', 'contempt', 'unknown']
|
93 |
|
|
|
94 |
def predict_emotion(frame):
|
95 |
frame = imutils.resize(frame, width=300)
|
96 |
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
@@ -112,23 +113,40 @@ def predict_emotion(frame):
|
|
112 |
(238, 164, 64), 2)
|
113 |
return frame
|
114 |
|
115 |
-
# Define
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
# demo.launch()
|
77 |
|
78 |
######################################################################################################################################################
|
|
|
79 |
import gradio as gr
|
80 |
+
import os
|
81 |
import cv2
|
82 |
import numpy as np
|
83 |
import imutils
|
|
|
91 |
emotion_classifier = load_model(emotion_model_path, compile=False)
|
92 |
EMOTIONS = ['neutral', 'happiness', 'surprise', 'sadness', 'anger', 'disgust', 'fear', 'contempt', 'unknown']
|
93 |
|
94 |
+
# Define a function to process each frame for emotion prediction
|
95 |
def predict_emotion(frame):
|
96 |
frame = imutils.resize(frame, width=300)
|
97 |
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
|
|
113 |
(238, 164, 64), 2)
|
114 |
return frame
|
115 |
|
116 |
+
# Define a function to process video input and output
|
117 |
+
def process_video(input_video_path, output_video_path):
|
118 |
+
# Open the video capture
|
119 |
+
cap = cv2.VideoCapture(input_video_path)
|
120 |
+
# Get video properties (dimensions, frame rate)
|
121 |
+
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
122 |
+
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
123 |
+
fps = cap.get(cv2.CAP_PROP_FPS)
|
124 |
+
# Define video writer for output
|
125 |
+
out = cv2.VideoWriter(output_video_path, cv2.VideoWriter_fourcc(*'XVID'), fps, (width, height))
|
126 |
+
|
127 |
+
# Process each frame in the video
|
128 |
+
while True:
|
129 |
+
ret, frame = cap.read()
|
130 |
+
if not ret:
|
131 |
+
break
|
132 |
+
frame_with_emotion = predict_emotion(frame)
|
133 |
+
out.write(frame_with_emotion)
|
134 |
+
|
135 |
+
# Release video capture and writer
|
136 |
+
cap.release()
|
137 |
+
out.release()
|
138 |
+
|
139 |
+
# Define the Gradio interface
|
140 |
+
demo = gr.Interface(
|
141 |
+
fn=process_video,
|
142 |
+
inputs=["video", "file"], # Allow video input from webcam or file
|
143 |
+
outputs="video", # Output video with emotion overlay
|
144 |
+
capture_session=True, # Maintain capture session for video input
|
145 |
+
title="Emotion Detection in Video",
|
146 |
+
description="Upload a video file or use your webcam to detect emotions in real-time.",
|
147 |
+
theme="huggingface",
|
148 |
+
)
|
149 |
+
|
150 |
+
# Launch the Gradio interface
|
151 |
+
if __name__ == "__main__":
|
152 |
+
demo.launch()
|