import streamlit as st from PIL import Image from deepface import DeepFace import tempfile import pandas as pd import cv2 as cv import threading from time import sleep st.title('Image Upload and Verification App') st.write('Please upload two images for facial verification.') # Upload two images uploaded_file1 = st.file_uploader("Choose the first image...", type=["jpg", "png", "jpeg"], key="1") uploaded_file2 = st.file_uploader("Choose the second image...", type=["jpg", "png", "jpeg"], key="2") # Define the global variables df = None analyze_img1 = None analyze_img2 = None def verify(img1_path, img2_path): global df model_name = 'VGG-Face' # You can change this to other models like "Facenet", "OpenFace", "DeepFace", etc. result = DeepFace.verify(img1_path=img1_path, img2_path=img2_path, model_name=model_name) result["img1_facial_areas"] = result["facial_areas"]["img1"] result["img2_facial_areas"] = result["facial_areas"]["img2"] del result["facial_areas"] df = pd.DataFrame([result]) def analyze_image1(img1_path): global analyze_img1 analyze_img1 = DeepFace.analyze(img_path=img1_path)[0] def analyze_image2(img2_path): global analyze_img2 analyze_img2 = DeepFace.analyze(img_path=img2_path)[0] def generate_analysis_sentence(analysis): age = analysis['age'] gender = [i for i in analysis['gender'].keys()][-1] dominant_emotion = analysis['dominant_emotion'] dominant_race = analysis['dominant_race'] # Highlight specific words in blue age_html = f"{age}" gender_html = f"{gender}" dominant_emotion_html = f"{dominant_emotion}" dominant_race_html = f"{dominant_race}" return f"""The person in the image appears to be {age_html} years old, identified as '{gender_html}'. The dominant emotion detected is {dominant_emotion_html}. Ethnicity prediction indicates {dominant_race_html}.""" def display_image_with_analysis(image, analysis): # Display the image st.image(image, caption='Image', use_column_width=True) # Display the analysis results st.write("Analysis:") st.markdown(generate_analysis_sentence(analysis), unsafe_allow_html=True) def drow_rectangle(): # Load images with OpenCV img1 = cv.imread(img1_path) img2 = cv.imread(img2_path) # Get facial areas and draw rectangles face_area1 = df.iloc[0]["img1_facial_areas"] p1_1 = (face_area1["x"], face_area1["y"]) p2_1 = (face_area1["x"] + face_area1["w"], face_area1["y"] + face_area1["h"]) rect_img1 = cv.rectangle(img1.copy(), p1_1, p2_1, (0, 255, 0), 2) face_area2 = df.iloc[0]["img2_facial_areas"] p1_2 = (face_area2["x"], face_area2["y"]) p2_2 = (face_area2["x"] + face_area2["w"], face_area2["y"] + face_area2["h"]) rect_img2 = cv.rectangle(img2.copy(), p1_2, p2_2, (0, 255, 0), 2) # Resize images with a better interpolation method rect_img1 = cv.cvtColor(rect_img1, cv.COLOR_BGR2RGB) rect_img1 = cv.resize(rect_img1, (200, 250), interpolation=cv.INTER_AREA) rect_img2 = cv.cvtColor(rect_img2, cv.COLOR_BGR2RGB) rect_img2 = cv.resize(rect_img2, (200, 250), interpolation=cv.INTER_AREA) #st.dataframe(df) # Display the results if df["verified"].iloc[0]: message = "The faces in the images match!" else: message = "The faces in the images do not match!" st.title(message) col1, col2 = st.columns(2) col1.image(rect_img1, caption='Verified Image 1', use_column_width=True) col2.image(rect_img2, caption='Verified Image 2', use_column_width=True) def get_analyze(): # Display the analysis results st.write("Analysis for Image 1:") try: st.markdown(generate_analysis_sentence(analyze_img1), unsafe_allow_html=True) except: st.warning("can't detect image 1") st.write("Analysis for Image 2:") try: st.markdown(generate_analysis_sentence(analyze_img2), unsafe_allow_html=True) except: st.warning("can't detect image 2") col1, col2 = st.columns(2) with col1: st.text("Check if the faces in the images match!") check = st.button("Check") with col2: st.text("Analyze the faces in each image!") analyze = st.button("Analyze") if uploaded_file1 is not None and uploaded_file2 is not None: # Open the images with PIL image1 = Image.open(uploaded_file1) image2 = Image.open(uploaded_file2) st.write("Here are your images:") # Convert images to RGB if they are in RGBA mode if image1.mode == 'RGBA': image1 = image1.convert('RGB') if image2.mode == 'RGBA': image2 = image2.convert('RGB') # Save the uploaded images to a temporary directory with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as tmp_file1: image1.save(tmp_file1.name) img1_path = tmp_file1.name with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as tmp_file2: image2.save(tmp_file2.name) img2_path = tmp_file2.name t1 = threading.Thread(target=verify, args=(img1_path, img2_path)) t2 = threading.Thread(target=analyze_image1, args=(img1_path,)) t3 = threading.Thread(target=analyze_image2, args=(img2_path,)) t1.start() t2.start() t3.start() t1.join() if check and not t1.is_alive(): n = 0 while True: try: drow_rectangle() sleep(2) break except: n = n + 1 print(f"Try : {n}") if n == 4: st.warning("Please make sure there are people's faces in each of the two photos or try again") break t2.join() t3.join() if analyze: n = 0 while t2.is_alive() or t3.is_alive(): sleep(2) while True: try: get_analyze() sleep(2) break except: n = n + 1 print(f"Try : {n}") if n == 4: st.warning("Please make sure there are people's faces in each of the two photos or try again") break else: st.write("Please upload both images to proceed.")