Geoffrey Hollingworth commited on
Commit
3d3f535
1 Parent(s): 72fcc88

initial upload

Browse files
Files changed (7) hide show
  1. .gitignore +118 -0
  2. README.md +8 -12
  3. app.py +146 -0
  4. app.py.safe +155 -0
  5. app.py.sentiment-one +118 -0
  6. requirements.txt +5 -0
  7. run_streamlist.sh +5 -0
.gitignore ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ pip-wheel-metadata/
24
+ share/python-wheels/
25
+ *.egg-info/
26
+ .installed.cfg
27
+ *.egg
28
+ MANIFEST
29
+
30
+ # PyInstaller
31
+ # Usually these files are written by a python script from a template
32
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
33
+ *.manifest
34
+ *.spec
35
+
36
+ # Installer logs
37
+ pip-log.txt
38
+ pip-delete-this-directory.txt
39
+
40
+ # Unit test / coverage reports
41
+ htmlcov/
42
+ .tox/
43
+ .nox/
44
+ .coverage
45
+ .coverage.*
46
+ .cache
47
+ nosetests.xml
48
+ coverage.xml
49
+ *.cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ .coverage
53
+
54
+ # Jupyter Notebook
55
+ .ipynb_checkpoints
56
+
57
+ # IPython
58
+ profile_default/
59
+ ipython_config.py
60
+
61
+ # pyenv
62
+ # For a library or tool, you might want to ignore these files since the code is intended to run in multiple environments;
63
+ # otherwise, check in the pyenv configuration files, especially if you are in an isolated environment.
64
+ .pyenv
65
+
66
+ # pipenv
67
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
68
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
69
+ # not cross-compatible, pipenv may install dependencies that are not in line with the rest of the team.
70
+ Pipfile.lock
71
+
72
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
73
+ __pypackages__/
74
+
75
+ # Environments
76
+ .env
77
+ .venv
78
+ env/
79
+ venv/
80
+ ENV/
81
+ env.bak/
82
+ venv.bak/
83
+
84
+ # Spyder project settings
85
+ .spyderproject
86
+ .spyproject
87
+
88
+ # Rope project settings
89
+ .ropeproject
90
+
91
+ # mkdocs documentation
92
+ /site
93
+
94
+ # mypy
95
+ .mypy_cache/
96
+ .dmypy.json
97
+ dmypy.json
98
+
99
+ # Pyre type checker
100
+ .pyre/
101
+
102
+ # Pycharm
103
+ .idea/
104
+
105
+ # VS Code
106
+ .vscode/
107
+
108
+ # Streamlit static files
109
+ .streamlit/
110
+
111
+ # Local environment variables
112
+ .env
113
+
114
+ # Deepface models
115
+ . deepface_weights/
116
+
117
+ # MacOS specific
118
+ .DS_Store
README.md CHANGED
@@ -1,13 +1,9 @@
1
- ---
2
- title: Sentiment Analyzer
3
- emoji: 🦀
4
- colorFrom: indigo
5
- colorTo: blue
6
- sdk: streamlit
7
- sdk_version: 1.35.0
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
1
+ # Facial Sentiment Analysis with Streamlit
 
 
 
 
 
 
 
 
 
 
2
 
3
+ This Streamlit application streams video from the webcam, analyzes facial sentiment, and displays the results in real-time.
4
+
5
+ ## How to Use
6
+
7
+ 1. Clone the repository.
8
+ 2. Ensure you have the necessary packages installed: `pip install -r requirements.txt`
9
+ 3. Run the application: `streamlit run app.py`
app.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ['OPENCV_AVFOUNDATION_SKIP_AUTH'] = '1'
3
+
4
+ import streamlit as st
5
+ import cv2
6
+ import numpy as np
7
+ from transformers import pipeline
8
+ from PIL import Image, ImageDraw
9
+ from mtcnn import MTCNN
10
+
11
+ # Initialize the Hugging Face pipeline for facial emotion detection
12
+ emotion_pipeline = pipeline("image-classification", model="trpakov/vit-face-expression")
13
+
14
+ # Initialize MTCNN for face detection
15
+ mtcnn = MTCNN()
16
+
17
+ # Function to analyze sentiment
18
+ def analyze_sentiment(face):
19
+ # Convert face to RGB
20
+ rgb_face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
21
+ # Convert the face to a PIL image
22
+ pil_image = Image.fromarray(rgb_face)
23
+ # Analyze sentiment using the Hugging Face pipeline
24
+ results = emotion_pipeline(pil_image)
25
+ # Get the dominant emotion
26
+ dominant_emotion = max(results, key=lambda x: x['score'])['label']
27
+ return dominant_emotion
28
+
29
+ TEXT_SIZE = 3
30
+
31
+ # Function to detect faces, analyze sentiment, and draw a red box around them
32
+ def detect_and_draw_faces(frame):
33
+ # Detect faces using MTCNN
34
+ results = mtcnn.detect_faces(frame)
35
+
36
+ # Draw on the frame
37
+ for result in results:
38
+ x, y, w, h = result['box']
39
+ face = frame[y:y+h, x:x+w]
40
+ sentiment = analyze_sentiment(face)
41
+ cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 10) # Thicker red box
42
+
43
+ # Calculate position for the text background and the text itself
44
+ text_size = cv2.getTextSize(sentiment, cv2.FONT_HERSHEY_SIMPLEX, TEXT_SIZE, 2)[0]
45
+ text_x = x
46
+ text_y = y - 10
47
+ background_tl = (text_x, text_y - text_size[1])
48
+ background_br = (text_x + text_size[0], text_y + 5)
49
+
50
+ # Draw black rectangle as background
51
+ cv2.rectangle(frame, background_tl, background_br, (0, 0, 0), cv2.FILLED)
52
+ # Draw white text on top
53
+ cv2.putText(frame, sentiment, (text_x, text_y), cv2.FONT_HERSHEY_SIMPLEX, TEXT_SIZE, (255, 255, 255), 2)
54
+
55
+ return frame
56
+
57
+ # Function to capture video from webcam
58
+ def video_stream():
59
+ video_capture = cv2.VideoCapture(0)
60
+ if not video_capture.isOpened():
61
+ st.error("Error: Could not open video capture device.")
62
+ return
63
+
64
+ while True:
65
+ ret, frame = video_capture.read()
66
+ if not ret:
67
+ st.error("Error: Failed to read frame from video capture device.")
68
+ break
69
+ yield frame
70
+
71
+ video_capture.release()
72
+
73
+ # Streamlit UI
74
+ st.markdown(
75
+ """
76
+ <style>
77
+ .main {
78
+ background-color: #FFFFFF;
79
+ }
80
+ .reportview-container .main .block-container{
81
+ padding-top: 2rem;
82
+ }
83
+ h1 {
84
+ color: #E60012;
85
+ font-family: 'Arial Black', Gadget, sans-serif;
86
+ }
87
+ h2 {
88
+ color: #E60012;
89
+ font-family: 'Arial', sans-serif;
90
+ }
91
+ h3 {
92
+ color: #333333;
93
+ font-family: 'Arial', sans-serif;
94
+ }
95
+ .stButton button {
96
+ background-color: #E60012;
97
+ color: white;
98
+ border-radius: 5px;
99
+ font-size: 16px;
100
+ }
101
+ </style>
102
+ """,
103
+ unsafe_allow_html=True
104
+ )
105
+
106
+ st.title("Computer Vision Test Lab")
107
+ st.subheader("Facial Sentiment")
108
+
109
+ # Columns for input and output streams
110
+ col1, col2 = st.columns(2)
111
+
112
+ with col1:
113
+ st.header("Input Stream")
114
+ st.subheader("Webcam")
115
+ video_placeholder = st.empty()
116
+
117
+ with col2:
118
+ st.header("Output Stream")
119
+ st.subheader("Analysis")
120
+ output_placeholder = st.empty()
121
+
122
+ sentiment_placeholder = st.empty()
123
+
124
+ # Start video stream
125
+ video_capture = cv2.VideoCapture(0)
126
+ if not video_capture.isOpened():
127
+ st.error("Error: Could not open video capture device.")
128
+ else:
129
+ while True:
130
+ ret, frame = video_capture.read()
131
+ if not ret:
132
+ st.error("Error: Failed to read frame from video capture device.")
133
+ break
134
+
135
+ # Display the input stream with the red box around the face
136
+ video_placeholder.image(frame, channels="BGR")
137
+
138
+ # Detect faces, analyze sentiment, and draw red boxes with sentiment labels
139
+ frame_with_boxes = detect_and_draw_faces(frame)
140
+
141
+ # Display the output stream (here it's the same as input, modify as needed)
142
+ output_placeholder.image(frame_with_boxes, channels="BGR")
143
+
144
+ # Add a short delay to control the frame rate
145
+ if cv2.waitKey(1) & 0xFF == ord('q'):
146
+ break
app.py.safe ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ['OPENCV_AVFOUNDATION_SKIP_AUTH'] = '1'
3
+
4
+ import streamlit as st
5
+ import cv2
6
+ import numpy as np
7
+ from transformers import pipeline
8
+ from PIL import Image, ImageDraw
9
+
10
+ # Initialize the Hugging Face pipeline for facial emotion detection using the "trpakov/vit-face-expression" model
11
+ emotion_pipeline = pipeline("image-classification", model="trpakov/vit-face-expression")
12
+
13
+ # Function to analyze sentiment
14
+ def analyze_sentiment(face):
15
+ # Convert face to RGB
16
+ rgb_face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
17
+ # Convert the face to a PIL image
18
+ pil_image = Image.fromarray(rgb_face)
19
+ # Analyze sentiment using the Hugging Face pipeline
20
+ results = emotion_pipeline(pil_image)
21
+ # Get the dominant emotion
22
+ dominant_emotion = max(results, key=lambda x: x['score'])['label']
23
+ return dominant_emotion
24
+
25
+ TEXT_SIZE = 3
26
+
27
+ # Function to detect faces, analyze sentiment, and draw a red box around them
28
+ def detect_and_draw_faces(frame):
29
+ # Convert frame to RGB
30
+ rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
31
+ # Convert the frame to a PIL image
32
+ pil_image = Image.fromarray(rgb_frame)
33
+ # Analyze sentiment using the Hugging Face pipeline
34
+ results = emotion_pipeline(pil_image)
35
+
36
+ # Print the results to understand the structure
37
+ print(results)
38
+
39
+ # Draw on the PIL image
40
+ draw = ImageDraw.Draw(pil_image)
41
+
42
+ # Iterate through detected faces
43
+ for result in results:
44
+ box = result['box']
45
+ sentiment = result['label']
46
+
47
+ # Draw rectangle and text
48
+ x, y, w, h = box['left'], box['top'], box['width'], box['height']
49
+ draw.rectangle(((x, y), (x+w, y+h)), outline="red", width=3)
50
+
51
+ # Calculate position for the text background and the text itself
52
+ text_size = draw.textsize(sentiment)
53
+ background_tl = (x, y - text_size[1] - 5)
54
+ background_br = (x + text_size[0], y)
55
+
56
+ # Draw black rectangle as background
57
+ draw.rectangle([background_tl, background_br], fill="black")
58
+ # Draw white text on top
59
+ draw.text((x, y - text_size[1]), sentiment, fill="white")
60
+
61
+ # Convert back to OpenCV format
62
+ frame_with_boxes = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
63
+
64
+ return frame_with_boxes
65
+
66
+ # Function to capture video from webcam
67
+ def video_stream():
68
+ video_capture = cv2.VideoCapture(0)
69
+ if not video_capture.isOpened():
70
+ st.error("Error: Could not open video capture device.")
71
+ return
72
+
73
+ while True:
74
+ ret, frame = video_capture.read()
75
+ if not ret:
76
+ st.error("Error: Failed to read frame from video capture device.")
77
+ break
78
+ yield frame
79
+
80
+ video_capture.release()
81
+
82
+ # Streamlit UI
83
+ st.markdown(
84
+ """
85
+ <style>
86
+ .main {
87
+ background-color: #FFFFFF;
88
+ }
89
+ .reportview-container .main .block-container{
90
+ padding-top: 2rem;
91
+ }
92
+ h1 {
93
+ color: #E60012;
94
+ font-family: 'Arial Black', Gadget, sans-serif;
95
+ }
96
+ h2 {
97
+ color: #E60012;
98
+ font-family: 'Arial', sans-serif;
99
+ }
100
+ h3 {
101
+ color: #333333;
102
+ font-family: 'Arial', sans-serif;
103
+ }
104
+ .stButton button {
105
+ background-color: #E60012;
106
+ color: white;
107
+ border-radius: 5px;
108
+ font-size: 16px;
109
+ }
110
+ </style>
111
+ """,
112
+ unsafe_allow_html=True
113
+ )
114
+
115
+ st.title("Computer Vision Test Lab")
116
+ st.subheader("Facial Sentiment")
117
+
118
+ # Columns for input and output streams
119
+ col1, col2 = st.columns(2)
120
+
121
+ with col1:
122
+ st.header("Input Stream")
123
+ st.subheader("Webcam")
124
+ video_placeholder = st.empty()
125
+
126
+ with col2:
127
+ st.header("Output Stream")
128
+ st.subheader("Analysis")
129
+ output_placeholder = st.empty()
130
+
131
+ sentiment_placeholder = st.empty()
132
+
133
+ # Start video stream
134
+ video_capture = cv2.VideoCapture(0)
135
+ if not video_capture.isOpened():
136
+ st.error("Error: Could not open video capture device.")
137
+ else:
138
+ while True:
139
+ ret, frame = video_capture.read()
140
+ if not ret:
141
+ st.error("Error: Failed to read frame from video capture device.")
142
+ break
143
+
144
+ # Detect faces, analyze sentiment, and draw red boxes with sentiment labels
145
+ frame_with_boxes = detect_and_draw_faces(frame)
146
+
147
+ # Display the input stream with the red box around the face
148
+ video_placeholder.image(frame_with_boxes, channels="BGR")
149
+
150
+ # Display the output stream (here it's the same as input, modify as needed)
151
+ output_placeholder.image(frame_with_boxes, channels="BGR")
152
+
153
+ # Add a short delay to control the frame rate
154
+ if cv2.waitKey(1) & 0xFF == ord('q'):
155
+ break
app.py.sentiment-one ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ['OPENCV_AVFOUNDATION_SKIP_AUTH'] = '1'
3
+
4
+ import streamlit as st
5
+ import cv2
6
+ from transformers import pipeline
7
+ from PIL import Image
8
+
9
+ # Initialize the Hugging Face pipeline for facial emotion detection
10
+ emotion_pipeline = pipeline("image-classification", model="dima806/facial_emotions_image_detection")
11
+
12
+ # Function to analyze sentiment
13
+ def analyze_sentiment(frame):
14
+ # Convert frame to RGB
15
+ rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
16
+ # Convert the frame to a PIL image
17
+ pil_image = Image.fromarray(rgb_frame)
18
+ # Analyze sentiment using the Hugging Face pipeline
19
+ results = emotion_pipeline(pil_image) # Analyze sentiment using the Hugging Face pipeline
20
+ results = emotion_pipeline(pil_image)
21
+ # Get the dominant emotion
22
+ dominant_emotion = max(results, key=lambda x: x['score'])['label']
23
+ return dominant_emotion
24
+
25
+ # Function to capture video from webcam
26
+ def video_stream():
27
+ video_capture = cv2.VideoCapture(0)
28
+ if not video_capture.isOpened():
29
+ st.error("Error: Could not open video capture device.")
30
+ return
31
+
32
+ while True:
33
+ ret, frame = video_capture.read()
34
+ if not ret:
35
+ st.error("Error: Failed to read frame from video capture device.")
36
+ break
37
+ yield frame
38
+
39
+ video_capture.release()
40
+
41
+ # Streamlit UI
42
+ st.markdown(
43
+ """
44
+ <style>
45
+ .main {
46
+ background-color: #FFFFFF;
47
+ }
48
+ .reportview-container .main .block-container{
49
+ padding-top: 2rem;
50
+ }
51
+ h1 {
52
+ color: #E60012;
53
+ font-family: 'Arial Black', Gadget, sans-serif;
54
+ }
55
+ h2 {
56
+ color: #E60012;
57
+ font-family: 'Arial', sans-serif;
58
+ }
59
+ h3 {
60
+ color: #333333;
61
+ font-family: 'Arial', sans-serif;
62
+ }
63
+ .stButton button {
64
+ background-color: #E60012;
65
+ color: white;
66
+ border-radius: 5px;
67
+ font-size: 16px;
68
+ }
69
+ </style>
70
+ """,
71
+ unsafe_allow_html=True
72
+ )
73
+
74
+ st.title("Computer Vision Test Lab")
75
+ st.subheader("Facial Sentiment")
76
+
77
+ # Columns for input and output streams
78
+ col1, col2 = st.columns(2)
79
+
80
+ with col1:
81
+ st.header("Input Stream")
82
+ st.subheader("Webcam")
83
+ video_placeholder = st.empty()
84
+
85
+ with col2:
86
+ st.header("Output Stream")
87
+ st.subheader("Analysis")
88
+ output_placeholder = st.empty()
89
+
90
+ sentiment_placeholder = st.empty()
91
+
92
+ # Start video stream
93
+ video_capture = cv2.VideoCapture(0)
94
+ if not video_capture.isOpened():
95
+ st.error("Error: Could not open video capture device.")
96
+ else:
97
+ while True:
98
+ ret, frame = video_capture.read()
99
+ if not ret:
100
+ st.error("Error: Failed to read frame from video capture device.")
101
+ break
102
+
103
+ # Display the input stream
104
+ video_placeholder.image(frame, channels="BGR")
105
+
106
+ # Analyze sentiment
107
+ sentiment = analyze_sentiment(frame)
108
+
109
+ # Display the output stream (here it's the same as input, modify as needed)
110
+ output_placeholder.image(frame, channels="BGR")
111
+
112
+ # Display sentiment
113
+ sentiment_placeholder.write(f"Sentiment: {sentiment}")
114
+
115
+ # Add a short delay to control the frame rate
116
+ if cv2.waitKey(1) & 0xFF == ord('q'):
117
+ break
118
+
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ streamlit
2
+ opencv-python-headless
3
+ numpy
4
+ transformers
5
+ torch
run_streamlist.sh ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Set Chrome as the default browser for this session
3
+ export BROWSER="/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"
4
+ # Run Streamlit with the provided arguments
5
+ streamlit run "$@"