File size: 3,912 Bytes
ab3c3a1
 
 
 
 
9aab067
 
 
 
 
 
 
 
 
 
 
ab3c3a1
 
 
 
 
 
 
 
 
 
 
 
 
 
9aab067
ab3c3a1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9aab067
ab3c3a1
 
9aab067
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ab3c3a1
 
9aab067
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ab3c3a1
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
from datasets import load_dataset
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats

# Additional Information for Depth and Camera Parameters
#
# Creating intrinsics for the camera
#   fov = 95.452621 # degrees
#   fx = (2448 / np.tan((fov*np.pi/180.0)/2.0)) / 2
#   intrinsics = o3d.camera.PinholeCameraIntrinsic(2448,2048,fx,fx,2448/2,2048/2)
# baseline = 3.88112 # cm
# Note: Depth is also in centimeters
#

def maj_vote(img,x,y,semantic_map,n=3):
    half = n // 2
    x_min, x_max = max(0, x - half), min(img.shape[1], x + half + 1)
    y_min, y_max = max(0, y - half), min(img.shape[0], y + half + 1)
    
    window = img[y_min:y_max, x_min:x_max].flatten()
    window = window[window != 255]
    
    if len(window) > 0:
        # Perform majority voting
        most_common_label = stats.mode(window, keepdims=True)[0][0]
        return most_common_label
    else:
        return semantic_map["background"][0]
    
def color_to_id(img_semantic,  semantic_map, top_k_disease = 10):
    semantic_id_img = np.ones(img_semantic.shape) * 255
    disease_counts = []
    # remap rendered color to semantic id
    for _, id_value_map in semantic_map.items():
        # track disease pixel counts for top_k_disease filtering
        if id_value_map[1] < 60 and id_value_map[1] > 1:        
            disease_counts.append(np.sum(np.where(img_semantic == id_value_map[1], 1, 0)))
        semantic_id_img[img_semantic == id_value_map[1]] = id_value_map[0]
    # filter for most common disease labels
    for i, item_i in enumerate(np.argsort(disease_counts)[::-1]):
        if i >= top_k_disease:
            id_value_map = list(semantic_map.items())[item_i][1]
            semantic_id_img[img_semantic == id_value_map[1]] = 255
            
    # Apply majority voting for unlabeled pixels (needed as the rendering process can blend pixels)
    unknown_mask = (semantic_id_img == 255)
    for y,x in np.argwhere(unknown_mask):
        semantic_id_img[y, x] = maj_vote(semantic_id_img, x, y, semantic_map, 3)
    return semantic_id_img

if __name__ == "__main__":
    # similar to cityscapes for mmsegmentation
    # class name, (new_id, img_id)
    semantic_map =  {
        "bacterial_spot":       (0, 5),
        "early_blight":         (1, 10),
        "late_blight":          (2, 20),
        "leaf_mold":            (3, 25),
        "septoria_leaf_spot":   (4,30),
        "spider_mites":         (5,35),
        "target_spot":          (6,40),
        "mosaic_virus":         (7,45),
        "yellow_leaf_curl_virus":(8,50),
        "healthy_leaf_pv":      (9, 15),  # plant village healthy leaf
        "healthy_leaf_t":       (9, 255), # texture leaf (healthy)
        "background":           (10, 0),
        "tomato":               (11, 121),
        "stem":                 (12, 111),
        "wood_rod":             (13, 101),
        "red_band":             (14, 140),
        "yellow_flower":        (15, 131)
                    }
    dataset = load_dataset("xingjianli/tomatotest", 'sample',trust_remote_code=True, num_proc=4)
    print(dataset["train"][0])


    left_rgb_img = dataset["train"][0]['left_rgb']
    right_rgb_img = dataset["train"][0]['right_rgb']
    left_semantic_img = np.asarray(dataset["train"][0]['left_semantic'])
    left_instance_img = np.asarray(dataset["train"][0]['left_instance'])
    left_depth_img = np.asarray(dataset["train"][0]['left_depth'])
    right_depth_img = np.asarray(dataset["train"][0]['right_depth'])
    plt.subplot(231)
    plt.imshow(left_rgb_img)
    plt.subplot(232)
    plt.imshow(right_rgb_img)
    plt.subplot(233)
    plt.imshow(color_to_id(left_semantic_img, semantic_map))
    plt.subplot(234)
    plt.imshow(np.where(left_depth_img>500,0,left_depth_img))
    plt.subplot(235)
    plt.imshow(np.where(right_depth_img>500,0,right_depth_img))
    plt.subplot(236)
    plt.imshow(left_instance_img)
    plt.show()