File size: 8,874 Bytes
0e2accf
 
 
0c36783
 
 
0e2accf
 
 
 
 
0c36783
 
 
 
0e2accf
0c36783
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0e2accf
0c36783
 
 
 
 
 
 
 
 
 
 
 
 
 
0e2accf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0c36783
 
 
c59fa34
0c36783
 
 
 
 
 
c59fa34
0c36783
 
 
 
 
 
 
 
 
 
 
 
0e2accf
0c36783
 
 
 
 
 
 
0e2accf
0c36783
 
 
26504b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0c36783
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0e2accf
0c36783
 
 
 
 
 
 
0e2accf
0c36783
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26504b5
0c36783
 
 
 
 
 
26504b5
 
 
 
 
 
 
0c36783
26504b5
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
import cv2
import dlib
import numpy as np
import os
import time
import mediapipe as mp
from skimage import feature

class AntiSpoofingSystem:
    def __init__(self):
        self.detector = dlib.get_frontal_face_detector()
        self.anti_spoofing_completed = False
        self.blink_count = 0
        self.image_captured = False
        self.captured_image = None
        self.predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")

        self.mp_hands = mp.solutions.hands
        self.hands = self.mp_hands.Hands(static_image_mode=False, max_num_hands=1, min_detection_confidence=0.7)

        self.cap = cv2.VideoCapture(0)
        self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
        self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)

        self.save_directory = "Person"
        if not os.path.exists(self.save_directory):
            os.makedirs(self.save_directory)

        self.net_smartphone = cv2.dnn.readNet('yolov4.weights', 'Pretrained_yolov4 (1).cfg')
        with open('PreTrained_coco.names', 'r') as f:
            self.classes_smartphone = f.read().strip().split('\n')

        self.EAR_THRESHOLD = 0.25
        self.BLINK_CONSEC_FRAMES = 4

        self.left_eye_state = False
        self.right_eye_state = False
        self.left_blink_counter = 0
        self.right_blink_counter = 0

        self.smartphone_detected = False
        self.smartphone_detection_frame_interval = 30
        self.frame_count = 0

        # New attributes for student data
        self.student_id = None
        self.student_name = None

    def calculate_ear(self, eye):
        A = np.linalg.norm(eye[1] - eye[5])
        B = np.linalg.norm(eye[2] - eye[4])
        C = np.linalg.norm(eye[0] - eye[3])
        return (A + B) / (2.0 * C)

    def analyze_texture(self, face_region):
        gray_face = cv2.cvtColor(face_region, cv2.COLOR_BGR2GRAY)
        lbp = feature.local_binary_pattern(gray_face, P=8, R=1, method="uniform")
        lbp_hist, _ = np.histogram(lbp.ravel(), bins=np.arange(0, 58), range=(0, 58))
        lbp_hist = lbp_hist.astype("float")
        lbp_hist /= (lbp_hist.sum() + 1e-5)
        return np.sum(lbp_hist[:10]) > 0.3

    def detect_hand_gesture(self, frame):
        results = self.hands.process(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
        return results.multi_hand_landmarks is not None

    def detect_smartphone(self, frame):
        if self.frame_count % self.smartphone_detection_frame_interval == 0:
            blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False)
            self.net_smartphone.setInput(blob)
            output_layers_names = self.net_smartphone.getUnconnectedOutLayersNames()
            detections = self.net_smartphone.forward(output_layers_names)

            for detection in detections:
                for obj in detection:
                    scores = obj[5:]
                    class_id = np.argmax(scores)
                    confidence = scores[class_id]
                    if confidence > 0.5 and self.classes_smartphone[class_id] == 'cell phone':
                        center_x = int(obj[0] * frame.shape[1])
                        center_y = int(obj[1] * frame.shape[0])
                        width = int(obj[2] * frame.shape[1])
                        height = int(obj[3] * frame.shape[0])
                        left = int(center_x - width / 2)
                        top = int(center_y - height / 2)

                        cv2.rectangle(frame, (left, top), (left + width, top + height), (0, 0, 255), 2)
                        cv2.putText(frame, 'Smartphone Detected', (left, top - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
                        
                        self.smartphone_detected = True
                        self.left_blink_counter = 0
                        self.right_blink_counter = 0
                        return

        self.frame_count += 1
        self.smartphone_detected = False

    def access_verified_image(self):
        ret, frame = self.cap.read()
        if not ret:
            return None

        # Perform anti-spoofing checks
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faces = self.detector(gray)
        
        # Check if a face is detected
        if len(faces) == 0:
            return None
        
        # Assume the first detected face is the subject
        face = faces[0]
        landmarks = self.predictor(gray, face)
        
        # Check for blink detection (assuming you have this method correctly implemented)
        leftEye = np.array([(landmarks.part(n).x, landmarks.part(n).y) for n in range(36, 42)])
        rightEye = np.array([(landmarks.part(n).x, landmarks.part(n).y) for n in range(42, 48)])
        ear_left = self.calculate_ear(leftEye)
        ear_right = self.calculate_ear(rightEye)
        if not self.detect_blink(ear_left, ear_right):
            return None

        # Check for hand gesture (assuming you have this method correctly implemented)
        if not self.detect_hand_gesture(frame):
            return None

        # Check if a smartphone is detected
        self.detect_smartphone(frame)
        if self.smartphone_detected:
            return None
        
        # Check texture for liveness (assuming you have this method correctly implemented)
        (x, y, w, h) = (face.left(), face.top(), face.width(), face.height())
        expanded_region = frame[max(y - h // 2, 0):min(y + 3 * h // 2, frame.shape[0]),
                                max(x - w // 2, 0):min(x + 3 * w // 2, frame.shape[1])]
        if not self.analyze_texture(expanded_region):
            return None

        return frame

    def detect_blink(self, left_ear, right_ear):
        if self.smartphone_detected:
            self.left_eye_state = False
            self.right_eye_state = False
            self.left_blink_counter = 0
            self.right_blink_counter = 0
            return False

        if left_ear < self.EAR_THRESHOLD:
            if not self.left_eye_state:
                self.left_eye_state = True
        else:
            if self.left_eye_state:
                self.left_eye_state = False
                self.left_blink_counter += 1

        if right_ear < self.EAR_THRESHOLD:
            if not self.right_eye_state:
                self.right_eye_state = True
        else:
            if self.right_eye_state:
                self.right_eye_state = False
                self.right_blink_counter += 1

        if self.left_blink_counter > 0 and self.right_blink_counter > 0:
            self.left_blink_counter = 0
            self.right_blink_counter = 0
            return True
        else:
            return False

    def run(self):
        ret, frame = self.cap.read()
        if not ret:
            return None

        # Detect smartphone in the frame
        self.detect_smartphone(frame)

        if self.smartphone_detected:
            cv2.putText(frame, "Mobile phone detected, can't record attendance", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        else:
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            faces = self.detector(gray)

            for face in faces:
                landmarks = self.predictor(gray, face)
                leftEye = np.array([(landmarks.part(n).x, landmarks.part(n).y) for n in range(36, 42)])
                rightEye = np.array([(landmarks.part(n).x, landmarks.part(n).y) for n in range(42, 48)])

                ear_left = self.calculate_ear(leftEye)
                ear_right = self.calculate_ear(rightEye)

                if self.detect_blink(ear_left, ear_right):
                    self.blink_count += 1
                    cv2.putText(frame, f"Blink Count: {self.blink_count}", (10, 70), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)

                # Check if conditions for image capture are met
                if self.blink_count >= 5 and not self.image_captured:
                    # Capture the image and reset blink count
                    self.save_image(frame)
                    self.blink_count = 0
                    self.image_captured = True

        return frame

    def save_image(self, frame):
        # Implement logic to save the frame as an image
        timestamp = int(time.time())
        image_name = f"captured_{timestamp}.png"
        cv2.imwrite(os.path.join(self.save_directory, image_name), frame)
        self.captured_image = frame
        print(f"Image captured and saved as {image_name}")

    def get_captured_image(self):
        # Return the captured image with preprocessing applied (if necessary)
        captured_frame = self.captured_image
        if captured_frame is not None:
            return captured_frame
        return None

if __name__ == "__main__":
    anti_spoofing_system = AntiSpoofingSystem()
    anti_spoofing_system.run()