capradeepgujaran
commited on
Commit
•
9fd1d46
1
Parent(s):
fc9e0d8
Update app.py
Browse files
app.py
CHANGED
@@ -12,11 +12,11 @@ def create_monitor_interface():
|
|
12 |
api_key = os.getenv("GROQ_API_KEY")
|
13 |
|
14 |
class SafetyMonitor:
|
15 |
-
def __init__(self, model_name: str = "
|
16 |
self.client = Groq(api_key=api_key)
|
17 |
self.model_name = model_name
|
18 |
-
self.max_image_size = (
|
19 |
-
self.jpeg_quality =
|
20 |
|
21 |
def resize_image(self, image):
|
22 |
"""Resize image while maintaining aspect ratio"""
|
@@ -32,37 +32,33 @@ def create_monitor_interface():
|
|
32 |
new_height = min(self.max_image_size[1], height)
|
33 |
new_width = int(new_height * aspect)
|
34 |
|
35 |
-
|
|
|
36 |
|
37 |
def analyze_frame(self, frame: np.ndarray) -> str:
|
38 |
if frame is None:
|
39 |
return "No frame received"
|
40 |
|
41 |
-
# Convert
|
42 |
if len(frame.shape) == 2:
|
43 |
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
|
44 |
elif len(frame.shape) == 3 and frame.shape[2] == 4:
|
45 |
frame = cv2.cvtColor(frame, cv2.COLOR_RGBA2RGB)
|
46 |
|
47 |
-
# Resize image
|
48 |
frame = self.resize_image(frame)
|
49 |
-
|
50 |
frame_pil = PILImage.fromarray(frame)
|
51 |
|
52 |
-
# Convert to base64 with
|
53 |
buffered = io.BytesIO()
|
54 |
-
frame_pil.save(buffered,
|
|
|
|
|
|
|
55 |
img_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8')
|
56 |
|
57 |
try:
|
58 |
-
|
59 |
-
|
60 |
-
2. Unsafe behaviors or positions
|
61 |
-
3. Equipment and machinery safety
|
62 |
-
4. Environmental hazards
|
63 |
-
Provide specific observations.
|
64 |
-
|
65 |
-
"""
|
66 |
|
67 |
completion = self.client.chat.completions.create(
|
68 |
messages=[
|
@@ -72,12 +68,12 @@ def create_monitor_interface():
|
|
72 |
}
|
73 |
],
|
74 |
model=self.model_name,
|
75 |
-
max_tokens=
|
76 |
-
temperature=0.
|
77 |
)
|
78 |
return completion.choices[0].message.content
|
79 |
except Exception as e:
|
80 |
-
print(f"Detailed error: {str(e)}")
|
81 |
return f"Analysis Error: {str(e)}"
|
82 |
|
83 |
def process_frame(self, frame: np.ndarray) -> tuple[np.ndarray, str]:
|
@@ -109,16 +105,13 @@ def create_monitor_interface():
|
|
109 |
monitor = SafetyMonitor()
|
110 |
|
111 |
with gr.Blocks() as demo:
|
112 |
-
gr.Markdown(""
|
113 |
-
# Safety Monitoring System
|
114 |
-
Upload an image to analyze workplace safety concerns.
|
115 |
-
""")
|
116 |
|
117 |
with gr.Row():
|
118 |
input_image = gr.Image(label="Upload Image")
|
119 |
-
output_image = gr.Image(label="
|
120 |
|
121 |
-
analysis_text = gr.Textbox(label="
|
122 |
|
123 |
def analyze_image(image):
|
124 |
if image is None:
|
@@ -127,7 +120,7 @@ def create_monitor_interface():
|
|
127 |
processed_frame, analysis = monitor.process_frame(image)
|
128 |
return processed_frame, analysis
|
129 |
except Exception as e:
|
130 |
-
print(f"Processing error: {str(e)}")
|
131 |
return None, f"Error processing image: {str(e)}"
|
132 |
|
133 |
input_image.change(
|
@@ -135,13 +128,6 @@ def create_monitor_interface():
|
|
135 |
inputs=input_image,
|
136 |
outputs=[output_image, analysis_text]
|
137 |
)
|
138 |
-
|
139 |
-
gr.Markdown("""
|
140 |
-
## Instructions:
|
141 |
-
1. Upload an image using the input panel
|
142 |
-
2. The system will automatically analyze it for safety concerns
|
143 |
-
3. View the analyzed image with overlay and detailed analysis below
|
144 |
-
""")
|
145 |
|
146 |
return demo
|
147 |
|
|
|
12 |
api_key = os.getenv("GROQ_API_KEY")
|
13 |
|
14 |
class SafetyMonitor:
|
15 |
+
def __init__(self, model_name: str = "mixtral-8x7b-vision"):
|
16 |
self.client = Groq(api_key=api_key)
|
17 |
self.model_name = model_name
|
18 |
+
self.max_image_size = (128, 128) # Drastically reduced size
|
19 |
+
self.jpeg_quality = 20 # Very low quality
|
20 |
|
21 |
def resize_image(self, image):
|
22 |
"""Resize image while maintaining aspect ratio"""
|
|
|
32 |
new_height = min(self.max_image_size[1], height)
|
33 |
new_width = int(new_height * aspect)
|
34 |
|
35 |
+
resized = cv2.resize(image, (new_width, new_height), interpolation=cv2.INTER_AREA)
|
36 |
+
return resized
|
37 |
|
38 |
def analyze_frame(self, frame: np.ndarray) -> str:
|
39 |
if frame is None:
|
40 |
return "No frame received"
|
41 |
|
42 |
+
# Convert and resize image
|
43 |
if len(frame.shape) == 2:
|
44 |
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
|
45 |
elif len(frame.shape) == 3 and frame.shape[2] == 4:
|
46 |
frame = cv2.cvtColor(frame, cv2.COLOR_RGBA2RGB)
|
47 |
|
|
|
48 |
frame = self.resize_image(frame)
|
|
|
49 |
frame_pil = PILImage.fromarray(frame)
|
50 |
|
51 |
+
# Convert to base64 with minimal size
|
52 |
buffered = io.BytesIO()
|
53 |
+
frame_pil.save(buffered,
|
54 |
+
format="JPEG",
|
55 |
+
quality=self.jpeg_quality,
|
56 |
+
optimize=True)
|
57 |
img_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8')
|
58 |
|
59 |
try:
|
60 |
+
# Minimal prompt
|
61 |
+
prompt = f"""List safety issues: """
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
|
63 |
completion = self.client.chat.completions.create(
|
64 |
messages=[
|
|
|
68 |
}
|
69 |
],
|
70 |
model=self.model_name,
|
71 |
+
max_tokens=100,
|
72 |
+
temperature=0.1
|
73 |
)
|
74 |
return completion.choices[0].message.content
|
75 |
except Exception as e:
|
76 |
+
print(f"Detailed error: {str(e)}")
|
77 |
return f"Analysis Error: {str(e)}"
|
78 |
|
79 |
def process_frame(self, frame: np.ndarray) -> tuple[np.ndarray, str]:
|
|
|
105 |
monitor = SafetyMonitor()
|
106 |
|
107 |
with gr.Blocks() as demo:
|
108 |
+
gr.Markdown("# Safety Analysis System")
|
|
|
|
|
|
|
109 |
|
110 |
with gr.Row():
|
111 |
input_image = gr.Image(label="Upload Image")
|
112 |
+
output_image = gr.Image(label="Results")
|
113 |
|
114 |
+
analysis_text = gr.Textbox(label="Analysis", lines=5)
|
115 |
|
116 |
def analyze_image(image):
|
117 |
if image is None:
|
|
|
120 |
processed_frame, analysis = monitor.process_frame(image)
|
121 |
return processed_frame, analysis
|
122 |
except Exception as e:
|
123 |
+
print(f"Processing error: {str(e)}")
|
124 |
return None, f"Error processing image: {str(e)}"
|
125 |
|
126 |
input_image.change(
|
|
|
128 |
inputs=input_image,
|
129 |
outputs=[output_image, analysis_text]
|
130 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
|
132 |
return demo
|
133 |
|