Datasets:
Size:
10K<n<100K
License:
add camera parameters
Browse files- example_load.py +56 -46
example_load.py
CHANGED
@@ -2,29 +2,18 @@ from datasets import load_dataset
|
|
2 |
import matplotlib.pyplot as plt
|
3 |
import numpy as np
|
4 |
from scipy import stats
|
5 |
-
# similar to cityscapes for mmsegmentation
|
6 |
-
# class name, (new_id, img_id)
|
7 |
-
semantic_map = {
|
8 |
-
"bacterial_spot": (0, 5),
|
9 |
-
"early_blight": (1, 10),
|
10 |
-
"late_blight": (2, 20),
|
11 |
-
"leaf_mold": (3, 25),
|
12 |
-
"septoria_leaf_spot": (4,30),
|
13 |
-
"spider_mites": (5,35),
|
14 |
-
"target_spot": (6,40),
|
15 |
-
"mosaic_virus": (7,45),
|
16 |
-
"yellow_leaf_curl_virus":(8,50),
|
17 |
-
"healthy_leaf_pv": (9, 15), # plant village healthy leaf
|
18 |
-
"healthy_leaf_t": (9, 255), # texture leaf (healthy)
|
19 |
-
"background": (10, 0),
|
20 |
-
"tomato": (11, 121),
|
21 |
-
"stem": (12, 111),
|
22 |
-
"wood_rod": (13, 101),
|
23 |
-
"red_band": (14, 140),
|
24 |
-
"yellow_flower": (15, 131)
|
25 |
-
}
|
26 |
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
half = n // 2
|
29 |
x_min, x_max = max(0, x - half), min(img.shape[1], x + half + 1)
|
30 |
y_min, y_max = max(0, y - half), min(img.shape[0], y + half + 1)
|
@@ -39,7 +28,7 @@ def maj_vote(img,x,y,n=3):
|
|
39 |
else:
|
40 |
return semantic_map["background"][0]
|
41 |
|
42 |
-
def color_to_id(img_semantic, top_k_disease = 10
|
43 |
semantic_id_img = np.ones(img_semantic.shape) * 255
|
44 |
disease_counts = []
|
45 |
# remap rendered color to semantic id
|
@@ -57,32 +46,53 @@ def color_to_id(img_semantic, top_k_disease = 10, semantic_map = semantic_map):
|
|
57 |
# Apply majority voting for unlabeled pixels (needed as the rendering process can blend pixels)
|
58 |
unknown_mask = (semantic_id_img == 255)
|
59 |
for y,x in np.argwhere(unknown_mask):
|
60 |
-
semantic_id_img[y, x] = maj_vote(semantic_id_img, x, y, 3)
|
61 |
return semantic_id_img
|
62 |
|
63 |
-
|
64 |
-
|
65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
|
68 |
-
left_rgb_img = dataset["train"][0]['left_rgb']
|
69 |
-
right_rgb_img = dataset["train"][0]['right_rgb']
|
70 |
-
left_semantic_img = np.asarray(dataset["train"][0]['left_semantic'])
|
71 |
-
left_instance_img = np.asarray(dataset["train"][0]['left_instance'])
|
72 |
-
left_depth_img = np.asarray(dataset["train"][0]['left_depth'])
|
73 |
-
right_depth_img = np.asarray(dataset["train"][0]['right_depth'])
|
74 |
-
plt.subplot(231)
|
75 |
-
plt.imshow(left_rgb_img)
|
76 |
-
plt.subplot(232)
|
77 |
-
plt.imshow(right_rgb_img)
|
78 |
-
plt.subplot(233)
|
79 |
-
plt.imshow(color_to_id(left_semantic_img))
|
80 |
-
plt.subplot(234)
|
81 |
-
plt.imshow(np.where(left_depth_img>500,0,left_depth_img))
|
82 |
-
plt.subplot(235)
|
83 |
-
plt.imshow(np.where(right_depth_img>500,0,right_depth_img))
|
84 |
-
plt.subplot(236)
|
85 |
-
plt.imshow(left_instance_img)
|
86 |
-
plt.show()
|
87 |
|
88 |
|
|
|
2 |
import matplotlib.pyplot as plt
|
3 |
import numpy as np
|
4 |
from scipy import stats
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
+
# Additional Information for Depth and Camera Parameters
|
7 |
+
#
|
8 |
+
# Creating intrinsics for the camera
|
9 |
+
# fov = 95.452621 # degrees
|
10 |
+
# fx = (2448 / np.tan((fov*np.pi/180.0)/2.0)) / 2
|
11 |
+
# intrinsics = o3d.camera.PinholeCameraIntrinsic(2448,2048,fx,fx,2448/2,2048/2)
|
12 |
+
# baseline = 3.88112 # cm
|
13 |
+
# Note: Depth is also in centimeters
|
14 |
+
#
|
15 |
+
|
16 |
+
def maj_vote(img,x,y,semantic_map,n=3):
|
17 |
half = n // 2
|
18 |
x_min, x_max = max(0, x - half), min(img.shape[1], x + half + 1)
|
19 |
y_min, y_max = max(0, y - half), min(img.shape[0], y + half + 1)
|
|
|
28 |
else:
|
29 |
return semantic_map["background"][0]
|
30 |
|
31 |
+
def color_to_id(img_semantic, semantic_map, top_k_disease = 10):
|
32 |
semantic_id_img = np.ones(img_semantic.shape) * 255
|
33 |
disease_counts = []
|
34 |
# remap rendered color to semantic id
|
|
|
46 |
# Apply majority voting for unlabeled pixels (needed as the rendering process can blend pixels)
|
47 |
unknown_mask = (semantic_id_img == 255)
|
48 |
for y,x in np.argwhere(unknown_mask):
|
49 |
+
semantic_id_img[y, x] = maj_vote(semantic_id_img, x, y, semantic_map, 3)
|
50 |
return semantic_id_img
|
51 |
|
52 |
+
if __name__ == "__main__":
|
53 |
+
# similar to cityscapes for mmsegmentation
|
54 |
+
# class name, (new_id, img_id)
|
55 |
+
semantic_map = {
|
56 |
+
"bacterial_spot": (0, 5),
|
57 |
+
"early_blight": (1, 10),
|
58 |
+
"late_blight": (2, 20),
|
59 |
+
"leaf_mold": (3, 25),
|
60 |
+
"septoria_leaf_spot": (4,30),
|
61 |
+
"spider_mites": (5,35),
|
62 |
+
"target_spot": (6,40),
|
63 |
+
"mosaic_virus": (7,45),
|
64 |
+
"yellow_leaf_curl_virus":(8,50),
|
65 |
+
"healthy_leaf_pv": (9, 15), # plant village healthy leaf
|
66 |
+
"healthy_leaf_t": (9, 255), # texture leaf (healthy)
|
67 |
+
"background": (10, 0),
|
68 |
+
"tomato": (11, 121),
|
69 |
+
"stem": (12, 111),
|
70 |
+
"wood_rod": (13, 101),
|
71 |
+
"red_band": (14, 140),
|
72 |
+
"yellow_flower": (15, 131)
|
73 |
+
}
|
74 |
+
dataset = load_dataset("xingjianli/tomatotest", 'sample',trust_remote_code=True, num_proc=4)
|
75 |
+
print(dataset["train"][0])
|
76 |
|
77 |
|
78 |
+
left_rgb_img = dataset["train"][0]['left_rgb']
|
79 |
+
right_rgb_img = dataset["train"][0]['right_rgb']
|
80 |
+
left_semantic_img = np.asarray(dataset["train"][0]['left_semantic'])
|
81 |
+
left_instance_img = np.asarray(dataset["train"][0]['left_instance'])
|
82 |
+
left_depth_img = np.asarray(dataset["train"][0]['left_depth'])
|
83 |
+
right_depth_img = np.asarray(dataset["train"][0]['right_depth'])
|
84 |
+
plt.subplot(231)
|
85 |
+
plt.imshow(left_rgb_img)
|
86 |
+
plt.subplot(232)
|
87 |
+
plt.imshow(right_rgb_img)
|
88 |
+
plt.subplot(233)
|
89 |
+
plt.imshow(color_to_id(left_semantic_img, semantic_map))
|
90 |
+
plt.subplot(234)
|
91 |
+
plt.imshow(np.where(left_depth_img>500,0,left_depth_img))
|
92 |
+
plt.subplot(235)
|
93 |
+
plt.imshow(np.where(right_depth_img>500,0,right_depth_img))
|
94 |
+
plt.subplot(236)
|
95 |
+
plt.imshow(left_instance_img)
|
96 |
+
plt.show()
|
97 |
|
98 |
|