brxerq commited on
Commit
0c36783
·
verified ·
1 Parent(s): e140ef8

Update anti_spoofing.py

Browse files
Files changed (1) hide show
  1. anti_spoofing.py +164 -33
anti_spoofing.py CHANGED
@@ -1,15 +1,51 @@
1
- import gradio as gr
2
  import cv2
3
  import dlib
4
  import numpy as np
 
 
 
5
  from skimage import feature
 
6
 
7
- # Initialize your AntiSpoofingSystem class as previously defined
8
  class AntiSpoofingSystem:
9
  def __init__(self):
10
  self.detector = dlib.get_frontal_face_detector()
 
 
 
 
11
  self.predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  self.EAR_THRESHOLD = 0.25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  def calculate_ear(self, eye):
15
  A = np.linalg.norm(eye[1] - eye[5])
@@ -25,42 +61,137 @@ class AntiSpoofingSystem:
25
  lbp_hist /= (lbp_hist.sum() + 1e-5)
26
  return np.sum(lbp_hist[:10]) > 0.3
27
 
28
- def process_image(self, image):
29
- # Convert the image to grayscale and detect faces
30
- gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
31
- faces = self.detector(gray)
32
 
33
- # If no face is detected, return a message
34
- if len(faces) == 0:
35
- return "No face detected. Please try again."
 
 
 
36
 
37
- # Process the first detected face
38
- face = faces[0]
39
- landmarks = self.predictor(gray, face)
40
- leftEye = np.array([(landmarks.part(n).x, landmarks.part(n).y) for n in range(36, 42)])
41
- rightEye = np.array([(landmarks.part(n).x, landmarks.part(n).y) for n in range(42, 48)])
42
-
43
- ear_left = self.calculate_ear(leftEye)
44
- ear_right = self.calculate_ear(rightEye)
 
 
 
 
45
 
46
- # Determine if a blink is detected
47
- blink_detected = (ear_left < self.EAR_THRESHOLD and ear_right < self.EAR_THRESHOLD)
48
- return "Blink detected!" if blink_detected else "No blink detected."
 
 
 
 
49
 
50
- # Define the Gradio interface
51
- anti_spoofing_system = AntiSpoofingSystem()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
- def detect_blink(image):
54
- result = anti_spoofing_system.process_image(image)
55
- return result
 
 
 
 
56
 
57
- iface = gr.Interface(
58
- fn=detect_blink,
59
- inputs=gr.Image(shape=(720, 1280)),
60
- outputs="text",
61
- title="Anti-Spoofing Detection System",
62
- description="Upload an image with a face to detect if a blink is detected."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  )
64
 
65
- # Launch the Gradio interface
66
- iface.launch()
 
 
 
1
  import cv2
2
  import dlib
3
  import numpy as np
4
+ import os
5
+ import time
6
+ import mediapipe as mp
7
  from skimage import feature
8
+ import gradio as gr
9
 
 
10
  class AntiSpoofingSystem:
11
  def __init__(self):
12
  self.detector = dlib.get_frontal_face_detector()
13
+ self.anti_spoofing_completed = False
14
+ self.blink_count = 0
15
+ self.image_captured = False
16
+ self.captured_image = None
17
  self.predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
18
+
19
+ self.mp_hands = mp.solutions.hands
20
+ self.hands = self.mp_hands.Hands(static_image_mode=False, max_num_hands=1, min_detection_confidence=0.7)
21
+
22
+ self.cap = cv2.VideoCapture(0)
23
+ self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
24
+ self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
25
+
26
+ self.save_directory = "Person"
27
+ if not os.path.exists(self.save_directory):
28
+ os.makedirs(self.save_directory)
29
+
30
+ self.net_smartphone = cv2.dnn.readNet('yolov4.weights', 'Pretrained_yolov4 (1).cfg')
31
+ with open('PreTrained_coco.names', 'r') as f:
32
+ self.classes_smartphone = f.read().strip().split('\n')
33
+
34
  self.EAR_THRESHOLD = 0.25
35
+ self.BLINK_CONSEC_FRAMES = 4
36
+
37
+ self.left_eye_state = False
38
+ self.right_eye_state = False
39
+ self.left_blink_counter = 0
40
+ self.right_blink_counter = 0
41
+
42
+ self.smartphone_detected = False
43
+ self.smartphone_detection_frame_interval = 30
44
+ self.frame_count = 0
45
+
46
+ # New attributes for student data
47
+ self.student_id = None
48
+ self.student_name = None
49
 
50
  def calculate_ear(self, eye):
51
  A = np.linalg.norm(eye[1] - eye[5])
 
61
  lbp_hist /= (lbp_hist.sum() + 1e-5)
62
  return np.sum(lbp_hist[:10]) > 0.3
63
 
64
+ def detect_hand_gesture(self, frame):
65
+ results = self.hands.process(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
66
+ return results.multi_hand_landmarks is not None
 
67
 
68
+ def detect_smartphone(self, frame):
69
+ if self.frame_count % self.smartphone_detection_frame_interval == 0:
70
+ blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False)
71
+ self.net_smartphone.setInput(blob)
72
+ output_layers_names = self.net_smartphone.getUnconnectedOutLayersNames()
73
+ detections = self.net_smartphone.forward(output_layers_names)
74
 
75
+ for detection in detections:
76
+ for obj in detection:
77
+ scores = obj[5:]
78
+ class_id = np.argmax(scores)
79
+ confidence = scores[class_id]
80
+ if confidence > 0.5 and self.classes_smartphone[class_id] == 'cell phone':
81
+ center_x = int(obj[0] * frame.shape[1])
82
+ center_y = int(obj[1] * frame.shape[0])
83
+ width = int(obj[2] * frame.shape[1])
84
+ height = int(obj[3] * frame.shape[0])
85
+ left = int(center_x - width / 2)
86
+ top = int(center_y - height / 2)
87
 
88
+ cv2.rectangle(frame, (left, top), (left + width, top + height), (0, 0, 255), 2)
89
+ cv2.putText(frame, 'Smartphone Detected', (left, top - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
90
+
91
+ self.smartphone_detected = True
92
+ self.left_blink_counter = 0
93
+ self.right_blink_counter = 0
94
+ return
95
 
96
+ self.frame_count += 1
97
+ self.smartphone_detected = False
98
+
99
+ def detect_blink(self, left_ear, right_ear):
100
+ if self.smartphone_detected:
101
+ self.left_eye_state = False
102
+ self.right_eye_state = False
103
+ self.left_blink_counter = 0
104
+ self.right_blink_counter = 0
105
+ return False
106
+
107
+ if left_ear < self.EAR_THRESHOLD:
108
+ if not self.left_eye_state:
109
+ self.left_eye_state = True
110
+ else:
111
+ if self.left_eye_state:
112
+ self.left_eye_state = False
113
+ self.left_blink_counter += 1
114
 
115
+ if right_ear < self.EAR_THRESHOLD:
116
+ if not self.right_eye_state:
117
+ self.right_eye_state = True
118
+ else:
119
+ if self.right_eye_state:
120
+ self.right_eye_state = False
121
+ self.right_blink_counter += 1
122
 
123
+ if self.left_blink_counter > 0 and self.right_blink_counter > 0:
124
+ self.left_blink_counter = 0
125
+ self.right_blink_counter = 0
126
+ return True
127
+ else:
128
+ return False
129
+
130
+ def run(self):
131
+ ret, frame = self.cap.read()
132
+ if not ret:
133
+ return None
134
+
135
+ # Detect smartphone in the frame
136
+ self.detect_smartphone(frame)
137
+
138
+ if self.smartphone_detected:
139
+ cv2.putText(frame, "Mobile phone detected, can't record attendance", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
140
+ else:
141
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
142
+ faces = self.detector(gray)
143
+
144
+ for face in faces:
145
+ landmarks = self.predictor(gray, face)
146
+ leftEye = np.array([(landmarks.part(n).x, landmarks.part(n).y) for n in range(36, 42)])
147
+ rightEye = np.array([(landmarks.part(n).x, landmarks.part(n).y) for n in range(42, 48)])
148
+
149
+ ear_left = self.calculate_ear(leftEye)
150
+ ear_right = self.calculate_ear(rightEye)
151
+
152
+ if self.detect_blink(ear_left, ear_right):
153
+ self.blink_count += 1
154
+ cv2.putText(frame, f"Blink Count: {self.blink_count}", (10, 70), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
155
+
156
+ # Check if conditions for image capture are met
157
+ if self.blink_count >= 5 and not self.image_captured:
158
+ # Capture the image and reset blink count
159
+ self.save_image(frame)
160
+ self.blink_count = 0
161
+ self.image_captured = True
162
+
163
+ return frame
164
+
165
+ def save_image(self, frame):
166
+ timestamp = int(time.time())
167
+ image_name = f"captured_{timestamp}.png"
168
+ cv2.imwrite(os.path.join(self.save_directory, image_name), frame)
169
+ self.captured_image = frame
170
+ print(f"Image captured and saved as {image_name}")
171
+
172
+ def run_gradio(self):
173
+ # Capture a frame, perform anti-spoofing, and return the result
174
+ frame = self.run()
175
+ if frame is None:
176
+ return "No frame detected", None
177
+
178
+ # Convert frame for Gradio to display
179
+ _, buffer = cv2.imencode('.png', frame)
180
+ return "Anti-spoofing check completed", buffer.tobytes()
181
+
182
+ # Initialize the anti-spoofing system
183
+ anti_spoofing_system = AntiSpoofingSystem()
184
+
185
+ # Define the Gradio interface
186
+ interface = gr.Interface(
187
+ fn=anti_spoofing_system.run_gradio,
188
+ inputs=[],
189
+ outputs=["text", "image"],
190
+ live=True,
191
+ title="Anti-Spoofing System",
192
+ description="This application performs an anti-spoofing check."
193
  )
194
 
195
+ # Launch the interface
196
+ if __name__ == "__main__":
197
+ interface.launch()