Spaces:
Runtime error
Runtime error
first commit
Browse files- README.md +0 -10
- app.py +148 -0
- end2end.onnx +3 -0
- requirements.txt +6 -0
README.md
CHANGED
@@ -1,13 +1,3 @@
|
|
1 |
---
|
2 |
-
title: OpenLanderONNXonline
|
3 |
-
emoji: ⚡
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: purple
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.21.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
license: mit
|
11 |
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
license: mit
|
3 |
---
|
|
|
|
app.py
ADDED
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
import os
|
4 |
+
import torch
|
5 |
+
import onnxruntime as ort
|
6 |
+
import time
|
7 |
+
from functools import wraps
|
8 |
+
import argparse
|
9 |
+
from PIL import Image
|
10 |
+
from io import BytesIO
|
11 |
+
import streamlit as st
|
12 |
+
|
13 |
+
# Parse command-line arguments
|
14 |
+
#parser = argparse.ArgumentParser()
|
15 |
+
#parser.add_argument("--mosaic", help="Enable mosaic processing mode", action="store_true")
|
16 |
+
#args = parser.parse_args()
|
17 |
+
#mosaic = args.mosaic # Set this based on your command line argument
|
18 |
+
|
19 |
+
# For streamlit use let's just set mosaic to "true", but I'm leavind the command-line arg here for anyone to use
|
20 |
+
|
21 |
+
mosaic = True
|
22 |
+
|
23 |
+
def center_crop(img, new_height, new_width):
|
24 |
+
height, width, _ = img.shape
|
25 |
+
start_x = width//2 - new_width//2
|
26 |
+
start_y = height//2 - new_height//2
|
27 |
+
return img[start_y:start_y+new_height, start_x:start_x+new_width]
|
28 |
+
|
29 |
+
|
30 |
+
def mosaic_crop(img, size):
|
31 |
+
height, width, _ = img.shape
|
32 |
+
padding_height = (size - height % size) % size
|
33 |
+
padding_width = (size - width % size) % size
|
34 |
+
|
35 |
+
padded_img = cv2.copyMakeBorder(img, 0, padding_height, 0, padding_width, cv2.BORDER_CONSTANT, value=[0, 0, 0])
|
36 |
+
tiles = [padded_img[x:x+size, y:y+size] for x in range(0, padded_img.shape[0], size) for y in range(0, padded_img.shape[1], size)]
|
37 |
+
|
38 |
+
return tiles, padded_img.shape[0] // size, padded_img.shape[1] // size, padding_height, padding_width
|
39 |
+
|
40 |
+
def stitch_tiles(tiles, rows, cols, size):
|
41 |
+
return np.concatenate([np.concatenate([tiles[i*cols + j] for j in range(cols)], axis=1) for i in range(rows)], axis=0)
|
42 |
+
|
43 |
+
|
44 |
+
def timing_decorator(func):
|
45 |
+
@wraps(func)
|
46 |
+
def wrapper(*args, **kwargs):
|
47 |
+
start_time = time.time()
|
48 |
+
result = func(*args, **kwargs)
|
49 |
+
end_time = time.time()
|
50 |
+
|
51 |
+
duration = end_time - start_time
|
52 |
+
print(f"Function '{func.__name__}' took {duration:.6f} seconds")
|
53 |
+
return result
|
54 |
+
|
55 |
+
return wrapper
|
56 |
+
|
57 |
+
@timing_decorator
|
58 |
+
def process_image(session, img, colors, mosaic=False):
|
59 |
+
if not mosaic:
|
60 |
+
# Crop the center of the image to 416x416 pixels
|
61 |
+
img = center_crop(img, 416, 416)
|
62 |
+
blob = cv2.dnn.blobFromImage(img, 1/255.0, (416, 416), swapRB=True, crop=False)
|
63 |
+
|
64 |
+
# Perform inference
|
65 |
+
output = session.run(None, {session.get_inputs()[0].name: blob})
|
66 |
+
|
67 |
+
# Assuming the output is a probability map where higher values indicate higher probability of a class
|
68 |
+
output_img = output[0].squeeze(0).transpose(1, 2, 0)
|
69 |
+
output_img = (output_img * 122).clip(0, 255).astype(np.uint8)
|
70 |
+
output_mask = output_img.max(axis=2)
|
71 |
+
|
72 |
+
output_mask_color = np.zeros((416, 416, 3), dtype=np.uint8)
|
73 |
+
|
74 |
+
# Assign specific colors to the classes in the mask
|
75 |
+
for class_idx in np.unique(output_mask):
|
76 |
+
if class_idx in colors:
|
77 |
+
output_mask_color[output_mask == class_idx] = colors[class_idx]
|
78 |
+
|
79 |
+
# Mask for the transparent class
|
80 |
+
transparent_mask = (output_mask == 122)
|
81 |
+
|
82 |
+
# Convert the mask to a 3-channel image
|
83 |
+
transparent_mask = np.stack([transparent_mask]*3, axis=-1)
|
84 |
+
|
85 |
+
# Where the mask is True, set the output color image to the input image
|
86 |
+
output_mask_color[transparent_mask] = img[transparent_mask]
|
87 |
+
|
88 |
+
# Make the colorful mask semi-transparent
|
89 |
+
overlay = cv2.addWeighted(img, 0.6, output_mask_color, 0.4, 0)
|
90 |
+
|
91 |
+
return overlay
|
92 |
+
|
93 |
+
|
94 |
+
# set cuda = true if you have an NVIDIA GPU
|
95 |
+
cuda = torch.cuda.is_available()
|
96 |
+
|
97 |
+
if cuda:
|
98 |
+
print("We have a GPU!")
|
99 |
+
providers = ['CUDAExecutionProvider'] if cuda else ['CPUExecutionProvider']
|
100 |
+
|
101 |
+
session = ort.InferenceSession('end2end.onnx', providers=providers)
|
102 |
+
|
103 |
+
|
104 |
+
# Define colors for classes 0, 122 and 244
|
105 |
+
colors = {0: (0, 0, 255), 122: (0, 0, 0), 244: (0, 255, 255)} # Red, Black, Yellow
|
106 |
+
|
107 |
+
def load_image(uploaded_file):
|
108 |
+
try:
|
109 |
+
image = Image.open(uploaded_file)
|
110 |
+
return cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
|
111 |
+
except Exception as e:
|
112 |
+
st.write("Could not load image: ", e)
|
113 |
+
return None
|
114 |
+
|
115 |
+
|
116 |
+
st.title("OpenLander ONNX app")
|
117 |
+
st.write("Upload an image to process with the ONNX OpenLander model!")
|
118 |
+
st.write("Bear in mind that this model is **much less refined** than the embedded models at the moment.")
|
119 |
+
|
120 |
+
|
121 |
+
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "png"])
|
122 |
+
if uploaded_file is not None:
|
123 |
+
img = load_image(uploaded_file)
|
124 |
+
if img.shape[2] == 4:
|
125 |
+
img = img[:, :, :3] # Drop the alpha channel if it exists
|
126 |
+
img_processed = None
|
127 |
+
|
128 |
+
if st.button('Process'):
|
129 |
+
with st.spinner('Processing...'):
|
130 |
+
start = time.time()
|
131 |
+
if mosaic:
|
132 |
+
tiles, rows, cols, padding_height, padding_width = mosaic_crop(img, 416)
|
133 |
+
processed_tiles = [process_image(session, tile, colors, mosaic=True) for tile in tiles]
|
134 |
+
overlay = stitch_tiles(processed_tiles, rows, cols, 416)
|
135 |
+
|
136 |
+
# Crop the padding back out
|
137 |
+
overlay = overlay[:overlay.shape[0]-padding_height, :overlay.shape[1]-padding_width]
|
138 |
+
img_processed = overlay
|
139 |
+
else:
|
140 |
+
img_processed = process_image(session, img, colors)
|
141 |
+
end = time.time()
|
142 |
+
st.write(f"Processing time: {end - start} seconds")
|
143 |
+
|
144 |
+
st.image(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), caption='Uploaded Image.', use_column_width=True)
|
145 |
+
|
146 |
+
if img_processed is not None:
|
147 |
+
st.image(cv2.cvtColor(img_processed, cv2.COLOR_BGR2RGB), caption='Processed Image.', use_column_width=True)
|
148 |
+
st.write("Red => obstacle ||| Yellow => Human obstacle ||| no color => clear for landing or delivery ")
|
end2end.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:812ee73706c48fb9ec4d17aa267488bb37adbdc1cbab484223042b5b82c17a0c
|
3 |
+
size 11185635
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
numpy==1.23.4
|
2 |
+
onnx==1.13.1
|
3 |
+
onnxruntime==1.14.1
|
4 |
+
opencv-contrib-python==4.5.5.62
|
5 |
+
opencv-python-headless==4.7.0.72
|
6 |
+
protobuf~=3.20.2
|