Update app.py
Browse files
app.py
CHANGED
@@ -6,137 +6,114 @@ import time
|
|
6 |
from PIL import Image
|
7 |
import io
|
8 |
import os
|
9 |
-
from dotenv import load_dotenv
|
10 |
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
"""
|
30 |
-
if frame is None:
|
31 |
-
return "No frame received"
|
32 |
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
completion = self.client.chat.completions.create(
|
52 |
-
messages=[
|
53 |
-
{
|
54 |
-
"role": "user",
|
55 |
-
"content": [
|
56 |
-
{"type": "text", "text": prompt},
|
57 |
-
{"type": "image", "image": img_byte_arr}
|
58 |
-
]
|
59 |
-
}
|
60 |
-
],
|
61 |
-
model=self.model_name,
|
62 |
-
max_tokens=200,
|
63 |
-
temperature=0.2
|
64 |
-
)
|
65 |
-
return completion.choices[0].message.content
|
66 |
-
except Exception as e:
|
67 |
-
return f"Analysis Error: {str(e)}"
|
68 |
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
# Add analysis text
|
87 |
-
cv2.putText(display_frame, "Safety Analysis:", (10, 30),
|
88 |
-
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
|
89 |
-
|
90 |
-
# Split and display analysis text
|
91 |
-
y_position = 60
|
92 |
-
for line in analysis.split('\n'):
|
93 |
-
cv2.putText(display_frame, line[:80], (10, y_position),
|
94 |
-
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
|
95 |
-
y_position += 30
|
96 |
|
97 |
-
|
98 |
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
# Initialize the safety monitor
|
105 |
-
monitor = SafetyMonitor(model_name="mixtral-8x7b-vision")
|
106 |
|
107 |
-
with gr.
|
108 |
-
gr.
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
return None, "Webcam not started"
|
125 |
-
processed_frame, analysis = monitor.process_frame(frame)
|
126 |
-
return processed_frame, analysis
|
127 |
-
|
128 |
-
webcam.stream(
|
129 |
-
fn=analyze_stream,
|
130 |
-
outputs=[output_image, analysis_text],
|
131 |
-
show_progress="hidden"
|
132 |
-
)
|
133 |
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
print(f"Error: {e}")
|
139 |
-
print("Please make sure to set the GROQ_API_KEY environment variable")
|
140 |
-
|
141 |
-
if __name__ == "__main__":
|
142 |
-
create_gradio_interface()
|
|
|
6 |
from PIL import Image
|
7 |
import io
|
8 |
import os
|
|
|
9 |
|
10 |
+
def create_monitor_interface():
|
11 |
+
api_key = os.getenv("GROQ_API_KEY")
|
12 |
+
|
13 |
+
if not api_key:
|
14 |
+
with gr.Blocks() as demo:
|
15 |
+
gr.Markdown("""
|
16 |
+
# ⚠️ Groq API Key Required
|
17 |
+
|
18 |
+
## Setup Instructions for Hugging Face Space:
|
19 |
+
1. Go to your Space's Settings tab
|
20 |
+
2. Scroll down to "Repository Secrets"
|
21 |
+
3. Click "New Secret"
|
22 |
+
4. Enter:
|
23 |
+
- Secret name: `GROQ_API_KEY`
|
24 |
+
- Secret value: Your Groq API key
|
25 |
+
5. Click "Add secret"
|
26 |
+
6. Rebuild the Space
|
27 |
+
|
28 |
+
Once configured, the safety monitoring system will be available.
|
29 |
+
""")
|
30 |
+
return demo
|
31 |
+
|
32 |
+
class SafetyMonitor:
|
33 |
+
def __init__(self, model_name: str = "mixtral-8x7b-vision"):
|
34 |
+
self.client = Groq(api_key=api_key)
|
35 |
+
self.model_name = model_name
|
36 |
|
37 |
+
def analyze_frame(self, frame: np.ndarray) -> str:
|
38 |
+
if frame is None:
|
39 |
+
return "No frame received"
|
40 |
+
|
41 |
+
frame_pil = Image.fromarray(frame)
|
42 |
+
img_byte_arr = io.BytesIO()
|
43 |
+
frame_pil.save(img_byte_arr, format='JPEG')
|
44 |
+
img_byte_arr = img_byte_arr.getvalue()
|
45 |
|
46 |
+
prompt = """Analyze this image for workplace safety issues. Focus on:
|
47 |
+
1. PPE usage (helmets, vests, etc.)
|
48 |
+
2. Unsafe behaviors
|
49 |
+
3. Equipment safety
|
50 |
+
4. Environmental hazards
|
51 |
+
Provide specific observations."""
|
|
|
|
|
|
|
52 |
|
53 |
+
try:
|
54 |
+
completion = self.client.chat.completions.create(
|
55 |
+
messages=[
|
56 |
+
{
|
57 |
+
"role": "user",
|
58 |
+
"content": [
|
59 |
+
{"type": "text", "text": prompt},
|
60 |
+
{"type": "image", "image": img_byte_arr}
|
61 |
+
]
|
62 |
+
}
|
63 |
+
],
|
64 |
+
model=self.model_name,
|
65 |
+
max_tokens=200,
|
66 |
+
temperature=0.2
|
67 |
+
)
|
68 |
+
return completion.choices[0].message.content
|
69 |
+
except Exception as e:
|
70 |
+
return f"Analysis Error: {str(e)}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
+
def process_frame(self, frame: np.ndarray) -> tuple[np.ndarray, str]:
|
73 |
+
if frame is None:
|
74 |
+
return None, "No frame received"
|
75 |
+
|
76 |
+
analysis = self.analyze_frame(frame)
|
77 |
+
display_frame = frame.copy()
|
78 |
|
79 |
+
# Add text overlay
|
80 |
+
overlay = display_frame.copy()
|
81 |
+
cv2.rectangle(overlay, (5, 5), (640, 200), (0, 0, 0), -1)
|
82 |
+
cv2.addWeighted(overlay, 0.3, display_frame, 0.7, 0, display_frame)
|
83 |
+
|
84 |
+
y_position = 30
|
85 |
+
for line in analysis.split('\n'):
|
86 |
+
cv2.putText(display_frame, line[:80], (10, y_position),
|
87 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
|
88 |
+
y_position += 30
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
|
90 |
+
return display_frame, analysis
|
91 |
|
92 |
+
# Create the main interface
|
93 |
+
monitor = SafetyMonitor()
|
94 |
+
|
95 |
+
with gr.Blocks() as demo:
|
96 |
+
gr.Markdown("# Real-time Safety Monitoring System")
|
|
|
|
|
97 |
|
98 |
+
with gr.Row():
|
99 |
+
webcam = gr.Image(source="webcam", streaming=True, label="Webcam Feed")
|
100 |
+
output_image = gr.Image(label="Analysis Feed")
|
101 |
+
|
102 |
+
analysis_text = gr.Textbox(label="Safety Analysis", lines=5)
|
103 |
|
104 |
+
def analyze_stream(frame):
|
105 |
+
if frame is None:
|
106 |
+
return None, "Webcam not started"
|
107 |
+
processed_frame, analysis = monitor.process_frame(frame)
|
108 |
+
return processed_frame, analysis
|
109 |
|
110 |
+
webcam.stream(
|
111 |
+
fn=analyze_stream,
|
112 |
+
outputs=[output_image, analysis_text],
|
113 |
+
show_progress=False
|
114 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
|
116 |
+
return demo
|
117 |
+
|
118 |
+
demo = create_monitor_interface()
|
119 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|