Spaces:
Sleeping
Sleeping
Wang
commited on
Commit
•
291a269
1
Parent(s):
f6ee317
Update app.py
Browse files
app.py
CHANGED
@@ -1,10 +1,12 @@
|
|
|
|
1 |
import numpy as np
|
2 |
from six import BytesIO
|
3 |
from PIL import Image
|
4 |
-
|
5 |
import tensorflow as tf
|
6 |
from object_detection.utils import label_map_util
|
7 |
from object_detection.utils import visualization_utils as viz_utils
|
|
|
|
|
8 |
import wget
|
9 |
import gradio as gr
|
10 |
from huggingface_hub import snapshot_download
|
@@ -81,172 +83,16 @@ def predict2(image_np):
|
|
81 |
|
82 |
return result_pil_img
|
83 |
|
84 |
-
# import cv2
|
85 |
-
|
86 |
-
# def predict_video(video_path):
|
87 |
-
# cap = cv2.VideoCapture(video_path)
|
88 |
-
# frame_width = int(cap.get(3))
|
89 |
-
# frame_height = int(cap.get(4))
|
90 |
-
|
91 |
-
# # Define the codec and create a video writer object
|
92 |
-
# out = cv2.VideoWriter('output.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width, frame_height))
|
93 |
-
|
94 |
-
# while cap.isOpened():
|
95 |
-
# ret, frame = cap.read()
|
96 |
-
# if not ret:
|
97 |
-
# break
|
98 |
-
|
99 |
-
# # Convert the frame to PIL image
|
100 |
-
# pil_image = Image.fromarray(frame)
|
101 |
-
|
102 |
-
# # Perform object detection on the frame
|
103 |
-
# result_pil_img = predict(pil_image)
|
104 |
-
|
105 |
-
# # Convert the result back to a NumPy array
|
106 |
-
# result_np_img = tf.keras.utils.img_to_array(result_pil_img)
|
107 |
-
|
108 |
-
# # Write the frame with detected objects to the video output
|
109 |
-
# out.write(result_np_img.astype('uint8'))
|
110 |
-
|
111 |
-
# # Release the video capture and writer objects
|
112 |
-
# cap.release()
|
113 |
-
# out.release()
|
114 |
-
|
115 |
-
# return "output.avi"
|
116 |
-
|
117 |
-
|
118 |
|
119 |
-
|
120 |
-
|
121 |
-
#
|
122 |
-
#
|
123 |
-
|
124 |
-
# # predicted_img = predict(image_arr)
|
125 |
-
# # predicted_img.save('predicted.jpg')
|
126 |
-
|
127 |
-
# gr.Interface(fn=predict,
|
128 |
-
# inputs=gr.Image(type="pil"),
|
129 |
-
# outputs=gr.Image(type="pil")
|
130 |
-
# ).launch(share=True)
|
131 |
-
|
132 |
-
# gr.Interface(
|
133 |
-
# fn=predict_video,
|
134 |
-
# inputs=gr.Video(type="file", label="Upload a video"),
|
135 |
-
# outputs=gr.Video(type="file", label="Download the processed video")
|
136 |
-
# ).launch(share=True)
|
137 |
-
|
138 |
-
# import numpy as np
|
139 |
-
# from six import BytesIO
|
140 |
-
# from PIL import Image
|
141 |
-
# import tensorflow as tf
|
142 |
-
# from object_detection.utils import label_map_util
|
143 |
-
# from object_detection.utils import visualization_utils as viz_utils
|
144 |
-
# from object_detection.utils import ops as utils_op
|
145 |
-
# from huggingface_hub import snapshot_download
|
146 |
-
# import gradio as gr
|
147 |
-
# import cv2
|
148 |
-
# import os
|
149 |
-
|
150 |
-
# # Install TensorFlow within the Hugging Face environment
|
151 |
-
# os.system('pip install tensorflow')
|
152 |
-
|
153 |
-
# # Now you can import TensorFlow
|
154 |
-
# import tensorflow as tf
|
155 |
-
|
156 |
-
# PATH_TO_LABELS = 'data/label_map.pbtxt'
|
157 |
-
# category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
|
158 |
-
|
159 |
-
# def pil_image_as_numpy_array(pilimg):
|
160 |
-
# img_array = tf.keras.utils.img_to_array(pilimg)
|
161 |
-
# img_array = np.expand_dims(img_array, axis=0)
|
162 |
-
# return img_array
|
163 |
-
|
164 |
-
# def load_image_into_numpy_array(path):
|
165 |
-
# image_data = tf.io.gfile.GFile(path, 'rb').read()
|
166 |
-
# image = Image.open(BytesIO(image_data))
|
167 |
-
# return pil_image_as_numpy_array(image)
|
168 |
-
|
169 |
-
# def load_model():
|
170 |
-
# download_dir = snapshot_download(REPO_ID)
|
171 |
-
# saved_model_dir = os.path.join(download_dir, "saved_model")
|
172 |
-
# detection_model = tf.saved_model.load(saved_model_dir)
|
173 |
-
# return detection_model
|
174 |
-
|
175 |
-
# def predict(pilimg):
|
176 |
-
# image_np = pil_image_as_numpy_array(pilimg)
|
177 |
-
# return predict_objects(image_np)
|
178 |
-
|
179 |
-
# def predict_objects(image_np):
|
180 |
-
# results = detection_model(image_np)
|
181 |
-
|
182 |
-
# # Different object detection models may have additional results
|
183 |
-
# result = {key: value.numpy() for key, value in results.items()}
|
184 |
-
|
185 |
-
# label_id_offset = 0
|
186 |
-
# image_np_with_detections = image_np.copy()
|
187 |
-
|
188 |
-
# viz_utils.visualize_boxes_and_labels_on_image_array(
|
189 |
-
# image_np_with_detections[0],
|
190 |
-
# result['detection_boxes'][0],
|
191 |
-
# (result['detection_classes'][0] + label_id_offset).astype(int),
|
192 |
-
# result['detection_scores'][0],
|
193 |
-
# category_index,
|
194 |
-
# use_normalized_coordinates=True,
|
195 |
-
# max_boxes_to_draw=200,
|
196 |
-
# min_score_thresh=0.60,
|
197 |
-
# agnostic_mode=False,
|
198 |
-
# line_thickness=2
|
199 |
-
# )
|
200 |
-
|
201 |
-
# result_pil_img = tf.keras.utils.array_to_img(image_np_with_detections[0])
|
202 |
-
# return result_pil_img
|
203 |
-
|
204 |
-
# def predict_video(video_path):
|
205 |
-
# cap = cv2.VideoCapture(video_path)
|
206 |
-
# frame_width = int(cap.get(3))
|
207 |
-
# frame_height = int(cap.get(4))
|
208 |
-
|
209 |
-
# # Define the codec and create a video writer object
|
210 |
-
# out = cv2.VideoWriter('output.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width, frame_height))
|
211 |
-
|
212 |
-
# while cap.isOpened():
|
213 |
-
# ret, frame = cap.read()
|
214 |
-
# if not ret:
|
215 |
-
# break
|
216 |
-
|
217 |
-
# # Convert the frame to PIL image
|
218 |
-
# pil_image = Image.fromarray(frame)
|
219 |
-
|
220 |
-
# # Perform object detection on the frame
|
221 |
-
# result_pil_img = predict_objects(frame)
|
222 |
-
|
223 |
-
# # Convert the result back to a NumPy array
|
224 |
-
# result_np_img = tf.keras.utils.img_to_array(result_pil_img)
|
225 |
-
|
226 |
-
# # Write the frame with detected objects to the video output
|
227 |
-
# out.write(result_np_img.astype('uint8'))
|
228 |
-
|
229 |
-
# # Release the video capture and writer objects
|
230 |
-
# cap.release()
|
231 |
-
# out.release()
|
232 |
-
|
233 |
-
# return "output.avi"
|
234 |
-
|
235 |
-
# REPO_ID = "Louisw3399/burgerorfriesdetector"
|
236 |
-
# detection_model = load_model()
|
237 |
-
|
238 |
-
# gr.Interface(
|
239 |
-
# fn=predict,
|
240 |
-
# inputs=gr.Image(type="pil"),
|
241 |
-
# outputs=gr.Image(type="pil"),
|
242 |
-
# label="Image Object Detection"
|
243 |
-
# ).launch(share=True)
|
244 |
-
|
245 |
-
# gr.Interface(
|
246 |
-
# fn=predict_video,
|
247 |
-
# inputs=gr.Video(type="file", label="Upload a video"),
|
248 |
-
# outputs=gr.Video(type="file", label="Download the processed video"),
|
249 |
-
# label="Video Object Detection"
|
250 |
-
# ).launch(share=True)
|
251 |
|
|
|
|
|
252 |
|
|
|
|
|
|
|
|
|
|
1 |
+
import matplotlib.pyplot as plt
|
2 |
import numpy as np
|
3 |
from six import BytesIO
|
4 |
from PIL import Image
|
|
|
5 |
import tensorflow as tf
|
6 |
from object_detection.utils import label_map_util
|
7 |
from object_detection.utils import visualization_utils as viz_utils
|
8 |
+
from object_detection.utils import ops as utils_op
|
9 |
+
import tarfile
|
10 |
import wget
|
11 |
import gradio as gr
|
12 |
from huggingface_hub import snapshot_download
|
|
|
83 |
|
84 |
return result_pil_img
|
85 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
|
87 |
+
REPO_ID = "Louisw3399/burgerorfriesdetector"
|
88 |
+
detection_model = load_model()
|
89 |
+
# pil_image = Image.open(image_path)
|
90 |
+
# image_arr = pil_image_as_numpy_array(pil_image)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
|
92 |
+
# predicted_img = predict(image_arr)
|
93 |
+
# predicted_img.save('predicted.jpg')
|
94 |
|
95 |
+
gr.Interface(fn=predict,
|
96 |
+
inputs=gr.Image(type="pil"),
|
97 |
+
outputs=gr.Image(type="pil")
|
98 |
+
).launch(share=True)
|