Spaces:
Sleeping
Sleeping
NTAMBARA Etienne
commited on
Commit
•
13c62db
1
Parent(s):
9ba2015
Changes Made Keys p3
Browse files
app.py
CHANGED
@@ -7,6 +7,7 @@ import pickle
|
|
7 |
import firebase_admin
|
8 |
from firebase_admin import credentials
|
9 |
from firebase_admin import db
|
|
|
10 |
from firebase_admin import storage
|
11 |
|
12 |
# Initialize Firebase
|
@@ -31,52 +32,57 @@ def recognize_face(input_image):
|
|
31 |
# Convert PIL Image to numpy array
|
32 |
img = np.array(input_image)
|
33 |
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
|
|
|
34 |
# Detect faces and encode
|
35 |
face_locations = face_recognition.face_locations(img)
|
36 |
face_encodings = face_recognition.face_encodings(img, face_locations)
|
|
|
37 |
# Initialize the database reference
|
38 |
ref = db.reference('Students')
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
for face_encoding in face_encodings:
|
43 |
matches = face_recognition.compare_faces(encodeListKnown, face_encoding)
|
44 |
name = "Unknown"
|
45 |
-
student_info = {}
|
46 |
|
47 |
face_distances = face_recognition.face_distance(encodeListKnown, face_encoding)
|
48 |
best_match_index = np.argmin(face_distances)
|
49 |
if matches[best_match_index]:
|
50 |
student_id = studentsIds[best_match_index]
|
51 |
-
|
|
|
52 |
|
53 |
if student_info:
|
54 |
name = student_info['name']
|
55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
else:
|
57 |
-
|
58 |
|
59 |
# Draw rectangles around the faces
|
60 |
-
|
61 |
-
|
62 |
-
cv2.putText(img, name, (left + 6, bottom - 6), cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255), 1)
|
63 |
|
64 |
# Convert back to PIL Image
|
65 |
pil_img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
|
66 |
-
return pil_img,
|
67 |
-
|
68 |
-
def process_webcam_image(image):
|
69 |
-
# Convert the base64 image to a format that can be processed
|
70 |
-
# Process the image through the face recognition function
|
71 |
-
return recognize_face(image)
|
72 |
# Gradio interface
|
73 |
iface = gr.Interface(
|
74 |
fn=recognize_face,
|
75 |
-
inputs=gr.Image(type="pil"),
|
76 |
-
outputs=[
|
77 |
-
|
78 |
-
|
|
|
|
|
|
|
79 |
)
|
80 |
|
81 |
if __name__ == "__main__":
|
82 |
-
iface.launch(
|
|
|
7 |
import firebase_admin
|
8 |
from firebase_admin import credentials
|
9 |
from firebase_admin import db
|
10 |
+
from datetime import datetime
|
11 |
from firebase_admin import storage
|
12 |
|
13 |
# Initialize Firebase
|
|
|
32 |
# Convert PIL Image to numpy array
|
33 |
img = np.array(input_image)
|
34 |
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
|
35 |
+
|
36 |
# Detect faces and encode
|
37 |
face_locations = face_recognition.face_locations(img)
|
38 |
face_encodings = face_recognition.face_encodings(img, face_locations)
|
39 |
+
|
40 |
# Initialize the database reference
|
41 |
ref = db.reference('Students')
|
42 |
+
recognized_faces_info = []
|
43 |
+
|
44 |
+
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
|
|
|
45 |
matches = face_recognition.compare_faces(encodeListKnown, face_encoding)
|
46 |
name = "Unknown"
|
|
|
47 |
|
48 |
face_distances = face_recognition.face_distance(encodeListKnown, face_encoding)
|
49 |
best_match_index = np.argmin(face_distances)
|
50 |
if matches[best_match_index]:
|
51 |
student_id = studentsIds[best_match_index]
|
52 |
+
student_ref = ref.child(student_id)
|
53 |
+
student_info = student_ref.get()
|
54 |
|
55 |
if student_info:
|
56 |
name = student_info['name']
|
57 |
+
# Increment total_attendance
|
58 |
+
student_info['total_attendance'] += 1
|
59 |
+
# Update last attendance time
|
60 |
+
student_info['last_attendance_time'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
61 |
+
# Write back to the database
|
62 |
+
student_ref.update(student_info)
|
63 |
+
recognized_faces_info.append(student_info)
|
64 |
else:
|
65 |
+
recognized_faces_info.append({'name': 'Unknown'})
|
66 |
|
67 |
# Draw rectangles around the faces
|
68 |
+
cv2.rectangle(img, (left, top), (right, bottom), (0, 0, 255), 2)
|
69 |
+
cv2.putText(img, name, (left + 6, bottom - 6), cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255), 1)
|
|
|
70 |
|
71 |
# Convert back to PIL Image
|
72 |
pil_img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
|
73 |
+
return pil_img, recognized_faces_info
|
74 |
+
|
|
|
|
|
|
|
|
|
75 |
# Gradio interface
|
76 |
iface = gr.Interface(
|
77 |
fn=recognize_face,
|
78 |
+
inputs=gr.components.Image(source="webcam", type="pil", tool="editor"),
|
79 |
+
outputs=[
|
80 |
+
gr.components.Image(type="pil"),
|
81 |
+
gr.components.JSON(label="Student Information")
|
82 |
+
],
|
83 |
+
title="Real-time Face Recognition Attendance System",
|
84 |
+
description="Activate your webcam and take a photo to check attendance."
|
85 |
)
|
86 |
|
87 |
if __name__ == "__main__":
|
88 |
+
iface.launch()
|
app1.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import face_recognition
|
3 |
+
import cv2
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image
|
6 |
+
import pickle
|
7 |
+
import firebase_admin
|
8 |
+
from firebase_admin import credentials
|
9 |
+
from firebase_admin import db
|
10 |
+
from firebase_admin import storage
|
11 |
+
|
12 |
+
# Initialize Firebase
|
13 |
+
cred = credentials.Certificate("serviceAccountKey.json") # Update with your credentials path
|
14 |
+
firebase_app = firebase_admin.initialize_app(cred, {
|
15 |
+
'databaseURL': 'https://faceantendancerealtime-default-rtdb.firebaseio.com/',
|
16 |
+
'storageBucket': 'faceantendancerealtime.appspot.com'
|
17 |
+
})
|
18 |
+
bucket = storage.bucket()
|
19 |
+
|
20 |
+
# Function to download face encodings from Firebase Storage
|
21 |
+
def download_encodings():
|
22 |
+
blob = bucket.blob('EncodeFile.p')
|
23 |
+
blob.download_to_filename('EncodeFile.p')
|
24 |
+
with open('EncodeFile.p', 'rb') as file:
|
25 |
+
return pickle.load(file)
|
26 |
+
|
27 |
+
encodeListKnownWithIds = download_encodings()
|
28 |
+
encodeListKnown, studentsIds = encodeListKnownWithIds
|
29 |
+
|
30 |
+
def recognize_face(input_image):
|
31 |
+
# Convert PIL Image to numpy array
|
32 |
+
img = np.array(input_image)
|
33 |
+
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
|
34 |
+
# Detect faces and encode
|
35 |
+
face_locations = face_recognition.face_locations(img)
|
36 |
+
face_encodings = face_recognition.face_encodings(img, face_locations)
|
37 |
+
# Initialize the database reference
|
38 |
+
ref = db.reference('Students')
|
39 |
+
|
40 |
+
# Recognize faces and fetch data from the database
|
41 |
+
results = []
|
42 |
+
for face_encoding in face_encodings:
|
43 |
+
matches = face_recognition.compare_faces(encodeListKnown, face_encoding)
|
44 |
+
name = "Unknown"
|
45 |
+
student_info = {}
|
46 |
+
|
47 |
+
face_distances = face_recognition.face_distance(encodeListKnown, face_encoding)
|
48 |
+
best_match_index = np.argmin(face_distances)
|
49 |
+
if matches[best_match_index]:
|
50 |
+
student_id = studentsIds[best_match_index]
|
51 |
+
student_info = ref.child(student_id).get()
|
52 |
+
|
53 |
+
if student_info:
|
54 |
+
name = student_info['name']
|
55 |
+
results.append(student_info)
|
56 |
+
else:
|
57 |
+
results.append({'name': 'Unknown'})
|
58 |
+
|
59 |
+
# Draw rectangles around the faces
|
60 |
+
for (top, right, bottom, left), name in zip(face_locations, [student_info.get('name', 'Unknown') for student_info in results]):
|
61 |
+
cv2.rectangle(img, (left, top), (right, bottom), (0, 0, 255), 2)
|
62 |
+
cv2.putText(img, name, (left + 6, bottom - 6), cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255), 1)
|
63 |
+
|
64 |
+
# Convert back to PIL Image
|
65 |
+
pil_img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
|
66 |
+
return pil_img, results
|
67 |
+
# Define a function to handle webcam images
|
68 |
+
def process_webcam_image(image):
|
69 |
+
# Convert the base64 image to a format that can be processed
|
70 |
+
# Process the image through the face recognition function
|
71 |
+
return recognize_face(image)
|
72 |
+
# Gradio interface
|
73 |
+
iface = gr.Interface(
|
74 |
+
fn=recognize_face,
|
75 |
+
inputs=gr.Image(type="pil"),
|
76 |
+
outputs=[gr.Image(type="pil"), gr.JSON(label="Student Information")],
|
77 |
+
title="Face Recognition Attendance System",
|
78 |
+
description="Upload an image to identify individuals."
|
79 |
+
)
|
80 |
+
|
81 |
+
if __name__ == "__main__":
|
82 |
+
iface.launch(debug=True,inline=False)
|