File size: 3,310 Bytes
3d3f535
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import os
os.environ['OPENCV_AVFOUNDATION_SKIP_AUTH'] = '1'

import streamlit as st
import cv2
from transformers import pipeline
from PIL import Image

# Initialize the Hugging Face pipeline for facial emotion detection
emotion_pipeline = pipeline("image-classification", model="dima806/facial_emotions_image_detection")

# Function to analyze sentiment
def analyze_sentiment(frame):
    # Convert frame to RGB
    rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    # Convert the frame to a PIL image
    pil_image = Image.fromarray(rgb_frame)
    # Analyze sentiment using the Hugging Face pipeline
    results = emotion_pipeline(pil_image)    # Analyze sentiment using the Hugging Face pipeline
    results = emotion_pipeline(pil_image)
    # Get the dominant emotion
    dominant_emotion = max(results, key=lambda x: x['score'])['label']
    return dominant_emotion

# Function to capture video from webcam
def video_stream():
    video_capture = cv2.VideoCapture(0)
    if not video_capture.isOpened():
        st.error("Error: Could not open video capture device.")
        return

    while True:
        ret, frame = video_capture.read()
        if not ret:
            st.error("Error: Failed to read frame from video capture device.")
            break
        yield frame

    video_capture.release()

# Streamlit UI
st.markdown(
    """
    <style>
        .main {
            background-color: #FFFFFF;
        }
        .reportview-container .main .block-container{
            padding-top: 2rem;
        }
        h1 {
            color: #E60012;
            font-family: 'Arial Black', Gadget, sans-serif;
        }
        h2 {
            color: #E60012;
            font-family: 'Arial', sans-serif;
        }
        h3 {
            color: #333333;
            font-family: 'Arial', sans-serif;
        }
        .stButton button {
            background-color: #E60012;
            color: white;
            border-radius: 5px;
            font-size: 16px;
        }
    </style>
    """,
    unsafe_allow_html=True
)

st.title("Computer Vision Test Lab")
st.subheader("Facial Sentiment")

# Columns for input and output streams
col1, col2 = st.columns(2)

with col1:
    st.header("Input Stream")
    st.subheader("Webcam")
    video_placeholder = st.empty()

with col2:
    st.header("Output Stream")
    st.subheader("Analysis")
    output_placeholder = st.empty()

sentiment_placeholder = st.empty()

# Start video stream
video_capture = cv2.VideoCapture(0)
if not video_capture.isOpened():
    st.error("Error: Could not open video capture device.")
else:
    while True:
        ret, frame = video_capture.read()
        if not ret:
            st.error("Error: Failed to read frame from video capture device.")
            break

        # Display the input stream
        video_placeholder.image(frame, channels="BGR")
        
        # Analyze sentiment
        sentiment = analyze_sentiment(frame)
        
        # Display the output stream (here it's the same as input, modify as needed)
        output_placeholder.image(frame, channels="BGR")
        
        # Display sentiment
        sentiment_placeholder.write(f"Sentiment: {sentiment}")

        # Add a short delay to control the frame rate
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break