Spaces:
Sleeping
Sleeping
Upload app.py
Browse files
app.py
CHANGED
@@ -11,14 +11,20 @@ import torch
|
|
11 |
|
12 |
IMAGE_SIZE = 640
|
13 |
|
14 |
-
model_path=hf_hub_download(
|
|
|
|
|
15 |
|
16 |
|
17 |
-
current_device=
|
18 |
-
model_types=["YOLOv5","YOLOv5 + SAHI"]
|
19 |
# Model
|
20 |
model = AutoDetectionModel.from_pretrained(
|
21 |
-
model_type="yolov5",
|
|
|
|
|
|
|
|
|
22 |
)
|
23 |
|
24 |
|
@@ -27,15 +33,15 @@ def sahi_yolo_inference(
|
|
27 |
image,
|
28 |
slice_height=512,
|
29 |
slice_width=512,
|
30 |
-
overlap_height_ratio=0.
|
31 |
-
overlap_width_ratio=0.
|
32 |
-
postprocess_type="
|
33 |
-
postprocess_match_metric="
|
34 |
-
postprocess_match_threshold=0.
|
35 |
postprocess_class_agnostic=False,
|
36 |
):
|
37 |
|
38 |
-
#image_width, image_height = image.size
|
39 |
# sliced_bboxes = sahi.slicing.get_slice_bboxes(
|
40 |
# image_height,
|
41 |
# image_width,
|
@@ -50,22 +56,21 @@ def sahi_yolo_inference(
|
|
50 |
# f"{len(sliced_bboxes)} slices are too much for huggingface spaces, try smaller slice size."
|
51 |
# )
|
52 |
|
53 |
-
|
54 |
-
|
55 |
-
text_th = None or max(rect_th - 1, 1)
|
56 |
|
57 |
if "SAHI" in model_type:
|
58 |
prediction_result_2 = sahi.predict.get_sliced_prediction(
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
)
|
70 |
visual_result_2 = sahi.utils.cv.visualize_object_predictions(
|
71 |
image=numpy.array(image),
|
@@ -94,41 +99,39 @@ def sahi_yolo_inference(
|
|
94 |
# sliced inference
|
95 |
|
96 |
|
97 |
-
|
98 |
-
|
99 |
inputs = [
|
100 |
-
gr.
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
gr.
|
|
|
|
|
|
|
|
|
|
|
107 |
["NMS", "GREEDYNMM"],
|
108 |
type="value",
|
109 |
-
|
110 |
label="postprocess_type",
|
111 |
),
|
112 |
-
gr.
|
113 |
-
|
114 |
-
),
|
115 |
-
gr.inputs.Number(default=0.5, label="postprocess_match_threshold"),
|
116 |
-
gr.inputs.Checkbox(default=True, label="postprocess_class_agnostic"),
|
117 |
]
|
118 |
|
119 |
-
outputs = [
|
120 |
-
gr.outputs.Image(type="pil", label="Output")
|
121 |
-
]
|
122 |
|
123 |
title = "Small Object Detection with SAHI + YOLOv5"
|
124 |
description = "SAHI + YOLOv5 demo for small object detection. Upload an image or click an example image to use."
|
125 |
article = "<p style='text-align: center'>SAHI is a lightweight vision library for performing large scale object detection/ instance segmentation.. <a href='https://github.com/obss/sahi'>SAHI Github</a> | <a href='https://medium.com/codable/sahi-a-vision-library-for-performing-sliced-inference-on-large-images-small-objects-c8b086af3b80'>SAHI Blog</a> | <a href='https://github.com/fcakyon/yolov5-pip'>YOLOv5 Github</a> </p>"
|
126 |
examples = [
|
127 |
-
[model_types[
|
128 |
-
[model_types[
|
129 |
-
[model_types[
|
130 |
-
[model_types[
|
131 |
-
|
132 |
]
|
133 |
gr.Interface(
|
134 |
sahi_yolo_inference,
|
@@ -139,4 +142,5 @@ gr.Interface(
|
|
139 |
article=article,
|
140 |
examples=examples,
|
141 |
theme="huggingface",
|
142 |
-
|
|
|
|
11 |
|
12 |
IMAGE_SIZE = 640
|
13 |
|
14 |
+
model_path = hf_hub_download(
|
15 |
+
"kadirnar/deprem_model_v1", filename="last.pt", revision="main"
|
16 |
+
)
|
17 |
|
18 |
|
19 |
+
current_device = "cuda" if torch.cuda.is_available() else "cpu"
|
20 |
+
model_types = ["YOLOv5", "YOLOv5 + SAHI"]
|
21 |
# Model
|
22 |
model = AutoDetectionModel.from_pretrained(
|
23 |
+
model_type="yolov5",
|
24 |
+
model_path=model_path,
|
25 |
+
device=current_device,
|
26 |
+
confidence_threshold=0.5,
|
27 |
+
image_size=IMAGE_SIZE,
|
28 |
)
|
29 |
|
30 |
|
|
|
33 |
image,
|
34 |
slice_height=512,
|
35 |
slice_width=512,
|
36 |
+
overlap_height_ratio=0.1,
|
37 |
+
overlap_width_ratio=0.1,
|
38 |
+
postprocess_type="NMS",
|
39 |
+
postprocess_match_metric="IOU",
|
40 |
+
postprocess_match_threshold=0.25,
|
41 |
postprocess_class_agnostic=False,
|
42 |
):
|
43 |
|
44 |
+
# image_width, image_height = image.size
|
45 |
# sliced_bboxes = sahi.slicing.get_slice_bboxes(
|
46 |
# image_height,
|
47 |
# image_width,
|
|
|
56 |
# f"{len(sliced_bboxes)} slices are too much for huggingface spaces, try smaller slice size."
|
57 |
# )
|
58 |
|
59 |
+
rect_th = None or max(round(sum(image.size) / 2 * 0.0001), 1)
|
60 |
+
text_th = None or max(rect_th - 2, 1)
|
|
|
61 |
|
62 |
if "SAHI" in model_type:
|
63 |
prediction_result_2 = sahi.predict.get_sliced_prediction(
|
64 |
+
image=image,
|
65 |
+
detection_model=model,
|
66 |
+
slice_height=int(slice_height),
|
67 |
+
slice_width=int(slice_width),
|
68 |
+
overlap_height_ratio=overlap_height_ratio,
|
69 |
+
overlap_width_ratio=overlap_width_ratio,
|
70 |
+
postprocess_type=postprocess_type,
|
71 |
+
postprocess_match_metric=postprocess_match_metric,
|
72 |
+
postprocess_match_threshold=postprocess_match_threshold,
|
73 |
+
postprocess_class_agnostic=postprocess_class_agnostic,
|
74 |
)
|
75 |
visual_result_2 = sahi.utils.cv.visualize_object_predictions(
|
76 |
image=numpy.array(image),
|
|
|
99 |
# sliced inference
|
100 |
|
101 |
|
|
|
|
|
102 |
inputs = [
|
103 |
+
gr.Dropdown(
|
104 |
+
choices=model_types,
|
105 |
+
label="Choose Model Type",
|
106 |
+
type="value",
|
107 |
+
value=model_types[1],
|
108 |
+
),
|
109 |
+
gr.Image(type="pil", label="Original Image"),
|
110 |
+
gr.Number(default=512, label="slice_height"),
|
111 |
+
gr.Number(default=512, label="slice_width"),
|
112 |
+
gr.Number(default=0.1, label="overlap_height_ratio"),
|
113 |
+
gr.Number(default=0.1, label="overlap_width_ratio"),
|
114 |
+
gr.Dropdown(
|
115 |
["NMS", "GREEDYNMM"],
|
116 |
type="value",
|
117 |
+
value="NMS",
|
118 |
label="postprocess_type",
|
119 |
),
|
120 |
+
gr.Dropdown(["IOU", "IOS"], type="value", value="IOU", label="postprocess_type"),
|
121 |
+
gr.Number(default=0.5, label="postprocess_match_threshold"),
|
122 |
+
gr.Checkbox(default=True, label="postprocess_class_agnostic"),
|
|
|
|
|
123 |
]
|
124 |
|
125 |
+
outputs = [gr.outputs.Image(type="pil", label="Output")]
|
|
|
|
|
126 |
|
127 |
title = "Small Object Detection with SAHI + YOLOv5"
|
128 |
description = "SAHI + YOLOv5 demo for small object detection. Upload an image or click an example image to use."
|
129 |
article = "<p style='text-align: center'>SAHI is a lightweight vision library for performing large scale object detection/ instance segmentation.. <a href='https://github.com/obss/sahi'>SAHI Github</a> | <a href='https://medium.com/codable/sahi-a-vision-library-for-performing-sliced-inference-on-large-images-small-objects-c8b086af3b80'>SAHI Blog</a> | <a href='https://github.com/fcakyon/yolov5-pip'>YOLOv5 Github</a> </p>"
|
130 |
examples = [
|
131 |
+
[model_types[1], "26.jpg", 512, 512, 0.1, 0.1, "NMS", "IOU", 0.25, False],
|
132 |
+
[model_types[1], "27.jpg", 512, 512, 0.1, 0.1, "NMS", "IOU", 0.25, False],
|
133 |
+
[model_types[1], "28.jpg", 512, 512, 0.1, 0.1, "NMS", "IOU", 0.25, False],
|
134 |
+
[model_types[1], "31.jpg", 512, 512, 0.1, 0.1, "NMS", "IOU", 0.25, False],
|
|
|
135 |
]
|
136 |
gr.Interface(
|
137 |
sahi_yolo_inference,
|
|
|
142 |
article=article,
|
143 |
examples=examples,
|
144 |
theme="huggingface",
|
145 |
+
cache_examples=True
|
146 |
+
).launch(debug=True, enable_queue=True, server_port=8500)
|