23A475R commited on
Commit
320484d
1 Parent(s): fbe49c2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -12
app.py CHANGED
@@ -17,6 +17,42 @@ EMOTIONS = ['neutral', 'happiness', 'surprise', 'sadness', 'anger', 'disgust', '
17
  # face_detector_mtcnn = MTCNN()
18
  classifier = load_model(emotion_model_path)
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  def predict_emotion(frame):
21
  frame = imutils.resize(frame, width=300)
22
  gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
@@ -24,12 +60,7 @@ def predict_emotion(frame):
24
  minNeighbors=5, minSize=(30, 30),
25
  flags=cv2.CASCADE_SCALE_IMAGE)
26
 
27
- frame_clone = frame.copy()
28
- if len(faces) > 0:
29
- faces = sorted(faces, reverse=True,
30
- key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0]
31
- (fX, fY, fW, fH) = faces
32
-
33
  # Extract the ROI of the face from the grayscale image, resize it to a fixed 28x28 pixels, and then prepare
34
  # the ROI for classification via the CNN
35
  roi = gray[fY:fY + fH, fX:fX + fW]
@@ -42,15 +73,12 @@ def predict_emotion(frame):
42
  label = EMOTIONS[preds.argmax()]
43
 
44
  # Overlay a box over the detected face
45
- cv2.putText(frame_clone, label, (fX, fY + 10),
46
  cv2.FONT_HERSHEY_DUPLEX, 1, (238, 164, 64), 1)
47
- cv2.rectangle(frame_clone, (fX, fY), (fX + fW, fY + fH),
48
  (238, 164, 64), 2)
49
 
50
- else:
51
- label = "Can't find your face"
52
-
53
- return frame_clone
54
 
55
 
56
 
 
17
  # face_detector_mtcnn = MTCNN()
18
  classifier = load_model(emotion_model_path)
19
 
20
+ # def predict_emotion(frame):
21
+ # frame = imutils.resize(frame, width=300)
22
+ # gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
23
+ # faces = face_detection.detectMultiScale(gray, scaleFactor=1.1,
24
+ # minNeighbors=5, minSize=(30, 30),
25
+ # flags=cv2.CASCADE_SCALE_IMAGE)
26
+
27
+ # frame_clone = frame.copy()
28
+ # if len(faces) > 0:
29
+ # faces = sorted(faces, reverse=True,
30
+ # key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0]
31
+ # (fX, fY, fW, fH) = faces
32
+
33
+ # # Extract the ROI of the face from the grayscale image, resize it to a fixed 28x28 pixels, and then prepare
34
+ # # the ROI for classification via the CNN
35
+ # roi = gray[fY:fY + fH, fX:fX + fW]
36
+ # roi = cv2.resize(roi, (48, 48))
37
+ # roi = roi.astype("float") / 255.0
38
+ # roi = img_to_array(roi)
39
+ # roi = np.expand_dims(roi, axis=0)
40
+
41
+ # preds = emotion_classifier.predict(roi)[0]
42
+ # label = EMOTIONS[preds.argmax()]
43
+
44
+ # # Overlay a box over the detected face
45
+ # cv2.putText(frame_clone, label, (fX, fY + 100),
46
+ # cv2.FONT_HERSHEY_DUPLEX, 1, (238, 164, 64), 1)
47
+ # cv2.rectangle(frame_clone, (fX, fY), (fX + fW, fY + fH),
48
+ # (238, 164, 64), 2)
49
+
50
+ # else:
51
+ # label = "Can't find your face"
52
+
53
+ # return frame_clone
54
+
55
+
56
  def predict_emotion(frame):
57
  frame = imutils.resize(frame, width=300)
58
  gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
 
60
  minNeighbors=5, minSize=(30, 30),
61
  flags=cv2.CASCADE_SCALE_IMAGE)
62
 
63
+ for (fX, fY, fW, fH) in faces:
 
 
 
 
 
64
  # Extract the ROI of the face from the grayscale image, resize it to a fixed 28x28 pixels, and then prepare
65
  # the ROI for classification via the CNN
66
  roi = gray[fY:fY + fH, fX:fX + fW]
 
73
  label = EMOTIONS[preds.argmax()]
74
 
75
  # Overlay a box over the detected face
76
+ cv2.putText(frame, label, (fX, fY + 100),
77
  cv2.FONT_HERSHEY_DUPLEX, 1, (238, 164, 64), 1)
78
+ cv2.rectangle(frame, (fX, fY), (fX + fW, fY + fH),
79
  (238, 164, 64), 2)
80
 
81
+ return frame
 
 
 
82
 
83
 
84