Hugo HE commited on
Commit
717802d
·
1 Parent(s): 13ad4fb
Base-RCNN-FPN.yaml ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MODEL:
2
+ META_ARCHITECTURE: "GeneralizedRCNN"
3
+
4
+ BACKBONE:
5
+ NAME: "build_resnet_fpn_backbone"
6
+
7
+ RESNETS:
8
+ OUT_FEATURES: ["res2", "res3", "res4", "res5"]
9
+
10
+ FPN:
11
+ IN_FEATURES: ["res2", "res3", "res4", "res5"]
12
+
13
+ ANCHOR_GENERATOR:
14
+ # One size for each in feature map
15
+ SIZES: [[32], [64], [128], [256], [512]]
16
+ # Three aspect ratios (same for all in feature maps)
17
+ ASPECT_RATIOS: [[0.5, 1.0, 2.0]]
18
+
19
+ RPN:
20
+ IN_FEATURES: ["p2", "p3", "p4", "p5", "p6"]
21
+ PRE_NMS_TOPK_TRAIN: 2000 # Per FPN level
22
+ PRE_NMS_TOPK_TEST: 1000 # Per FPN level
23
+ # Detectron1 uses 2000 proposals per-batch,
24
+ # (See "modeling/rpn/rpn_outputs.py" for details of this legacy issue)
25
+ # which is approximately 1000 proposals per-image since the default
26
+ # batch size for FPN is 2.
27
+ POST_NMS_TOPK_TRAIN: 1000
28
+ POST_NMS_TOPK_TEST: 1000
29
+
30
+ ROI_HEADS:
31
+ NAME: "StandardROIHeads"
32
+ IN_FEATURES: ["p2", "p3", "p4", "p5"]
33
+
34
+ ROI_BOX_HEAD:
35
+ NAME: "FastRCNNConvFCHead"
36
+ NUM_FC: 2
37
+ POOLER_RESOLUTION: 7
38
+
39
+ ROI_MASK_HEAD:
40
+ NAME: "MaskRCNNConvUpsampleHead"
41
+ NUM_CONV: 4
42
+ POOLER_RESOLUTION: 14
43
+
44
+ INPUT:
45
+ MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
46
+
47
+ SOLVER:
48
+ CHECKPOINT_PERIOD: 210000
49
+
50
+ VERSION: 2
abstractions/.DS_Store ADDED
Binary file (6.15 kB). View file
 
abstractions/Box.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from copy import deepcopy
2
+
3
+ class Box:
4
+ def __init__(self):
5
+ self.dimensions = None
6
+ self.ivals = []
7
+ self.element_indexes = [] # record this box is built for what samples
8
+ self.low_bound_indexes = dict() # record which samples visit the low bound for each dimension
9
+ self.high_bound_indexes = dict() # record which samples visit the low bound for each dimension
10
+
11
+ def build(self, dimensions, points):
12
+ # a point is a tuple (index, n-dim numpy)
13
+ # index = point[0]
14
+ # value = point[1]
15
+ piter = iter(points)
16
+ self.dimensions = dimensions
17
+ self.ivals = []
18
+ self.element_indexes = []
19
+ self.low_bound_indexes = dict()
20
+ self.high_bound_indexes = dict()
21
+
22
+ try:
23
+ point = next(piter)
24
+ except StopIteration:
25
+ return
26
+ else:
27
+ self.element_indexes.append(point[0]) # update index list
28
+ i = 0
29
+ for coord in point[1]:
30
+ if(i >= self.dimensions):
31
+ break
32
+ self.ivals.append([coord, coord])
33
+ self.low_bound_indexes["n"+str(i+1)] = [point[0]] # update low bound visiting index list
34
+ self.high_bound_indexes["n"+str(i+1)] = [point[0]] # update upper bound visiting index list
35
+ i += 1
36
+ if(len(self.ivals) != self.dimensions):
37
+ raise "IllegalArgument"
38
+
39
+ while True:
40
+ try:
41
+ point = next(piter)
42
+ except StopIteration:
43
+ break
44
+ else:
45
+ self.element_indexes.append(point[0]) # update index list
46
+ i = 0
47
+ for coord in point[1]:
48
+ if(i >= self.dimensions):
49
+ break
50
+ ival = self.ivals[i]
51
+ if(coord < ival[0]):
52
+ ival[0] = coord
53
+ self.low_bound_indexes["n"+str(i+1)] = [point[0]] # update the bound and its index
54
+ elif(coord == ival[0]):
55
+ low_index_list = self.low_bound_indexes["n"+str(i+1)]
56
+ low_index_list.append(point[0])
57
+
58
+ if(coord > ival[1]):
59
+ ival[1] = coord
60
+ self.high_bound_indexes["n"+str(i+1)] = [point[0]] # update the bound and its index
61
+ elif(coord == ival[1]):
62
+ high_index_list = self.high_bound_indexes["n"+str(i+1)]
63
+ high_index_list.append(point[0])
64
+ i += 1
65
+
66
+ def query(self, point):
67
+ i = 0
68
+ for coord in point:
69
+ if(i >= self.dimensions):
70
+ break
71
+ ival = self.ivals[i]
72
+ if(coord < ival[0] or coord > ival[1]):
73
+ return False
74
+ i += 1
75
+ return True
76
+
77
+ def __str__(self):
78
+ return self.ivals.__str__()
79
+
80
+
81
+ def boxes_query(point, boxes):
82
+ for box in boxes:
83
+ if len(box.ivals):
84
+ if box.query(point):
85
+ return True
86
+ return False
87
+
88
+
89
+
abstractions/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .Box import *
abstractions/__pycache__/Ball.cpython-38.pyc ADDED
Binary file (1.96 kB). View file
 
abstractions/__pycache__/Box.cpython-38.pyc ADDED
Binary file (2.04 kB). View file
 
abstractions/__pycache__/Box.cpython-39.pyc ADDED
Binary file (2.01 kB). View file
 
abstractions/__pycache__/ConvexHull.cpython-38.pyc ADDED
Binary file (6.95 kB). View file
 
abstractions/__pycache__/LCBox1.cpython-38.pyc ADDED
Binary file (2.35 kB). View file
 
abstractions/__pycache__/LCBox2.cpython-38.pyc ADDED
Binary file (1.58 kB). View file
 
abstractions/__pycache__/Options.cpython-38.pyc ADDED
Binary file (1.88 kB). View file
 
abstractions/__pycache__/PointCollection.cpython-38.pyc ADDED
Binary file (3.56 kB). View file
 
abstractions/__pycache__/Sphere.cpython-38.pyc ADDED
Binary file (1.7 kB). View file
 
abstractions/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (191 Bytes). View file
 
abstractions/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (165 Bytes). View file
 
app.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # %%
2
+ try:
3
+ import detectron2
4
+ except:
5
+ import os
6
+ os.system('pip install git+https://github.com/facebookresearch/detectron2.git')
7
+ import torch
8
+ from detectron2.utils.logger import setup_logger
9
+ setup_logger()
10
+
11
+ from detectron2.config import get_cfg
12
+ import detectron2.data.transforms as T
13
+ from detectron2.checkpoint import DetectionCheckpointer
14
+ from detectron2.modeling import build_model
15
+ from detectron2.utils.visualizer import Visualizer
16
+ from detectron2.data.detection_utils import read_image
17
+ from detectron2.data import MetadataCatalog
18
+
19
+ import numpy as np
20
+ import cv2
21
+ from PIL import Image
22
+ import random
23
+ import gradio as gr
24
+ import pandas as pd
25
+ import matplotlib.pyplot as plt
26
+ import io
27
+ from pickle import load
28
+
29
+ torch.manual_seed(0)
30
+ np.random.seed(0)
31
+ random.seed(0)
32
+
33
+ from base_cam import EigenCAM
34
+ from pytorch_grad_cam.utils.model_targets import FasterRCNNBoxScoreTarget
35
+
36
+ class Detectron2Monitor():
37
+ def __init__(self, label_list, label_dict, config_file, model_file):
38
+ self.label_list = label_list
39
+ self.cfg = self._setup_cfg(config_file, model_file)
40
+ self.model = build_model(self.cfg)
41
+ self.model.eval()
42
+ checkpointer = DetectionCheckpointer(self.model)
43
+ checkpointer.load(self.cfg.MODEL.WEIGHTS)
44
+ self.monitors_dict = self._load_monitor()
45
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
46
+ self.class_dict = label_dict
47
+
48
+ def _setup_cfg(self, config_file, model_file):
49
+ cfg = get_cfg()
50
+ cfg.merge_from_file(config_file)
51
+ cfg.MODEL.WEIGHTS = model_file
52
+ cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
53
+ if torch.cuda.is_available():
54
+ cfg.MODEL.DEVICE = "cuda"
55
+ else:
56
+ cfg.MODEL.DEVICE = "cpu"
57
+ cfg.freeze()
58
+ return cfg
59
+
60
+ def _get_input_dict(self, original_image):
61
+ height, width = original_image.shape[:2]
62
+ transform_gen = T.ResizeShortestEdge(
63
+ [self.cfg.INPUT.MIN_SIZE_TEST, self.cfg.INPUT.MIN_SIZE_TEST], self.cfg.INPUT.MAX_SIZE_TEST
64
+ )
65
+ image = transform_gen.get_transform(original_image).apply_image(original_image)
66
+ image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
67
+ inputs = {"image": image, "height": height, "width": width}
68
+ return inputs
69
+
70
+ def _postprocess_cam(self, raw_cam, img_width, img_height):
71
+ cam_orig = np.sum(raw_cam, axis=0) # [H,W]
72
+ cam_orig = np.maximum(cam_orig, 0) # ReLU
73
+ cam_orig -= np.min(cam_orig)
74
+ cam_orig /= np.max(cam_orig)
75
+ cam = cv2.resize(cam_orig, (img_width, img_height))
76
+ return cam
77
+
78
+ def _inference(self, inputs):
79
+ with torch.no_grad():
80
+ images = self.model.preprocess_image(inputs)
81
+ features = self.model.backbone(images.tensor)
82
+ proposals, _ = self.model.proposal_generator(images, features, None) # RPN
83
+
84
+ features_ = [features[f] for f in self.model.roi_heads.box_in_features]
85
+ box_features = self.model.roi_heads.box_pooler(features_, [x.proposal_boxes for x in proposals])
86
+ box_features = self.model.roi_heads.box_head(box_features) # features of all 1k candidates
87
+ predictions = self.model.roi_heads.box_predictor(box_features)
88
+ pred_instances, pred_inds = self.model.roi_heads.box_predictor.inference(predictions, proposals)
89
+ pred_instances = self.model.roi_heads.forward_with_given_boxes(features, pred_instances)
90
+
91
+ # output boxes, masks, scores, etc
92
+ pred_instances = self.model._postprocess(pred_instances, inputs, images.image_sizes) # scale box to orig size
93
+ # features of the proposed boxes
94
+ feats = box_features[pred_inds]
95
+ return pred_instances, feats
96
+
97
+ def _load_monitor(self):
98
+ with open("monitors_dict.pkl", 'rb') as f:
99
+ monitors_dict = load(f)
100
+ return monitors_dict
101
+ # monitors_dict = {}
102
+ # for class_name in self.label_list:
103
+ # if class_name == "train" or class_name == "OOD":
104
+ # continue
105
+ # monitor_path = f"monitors/{class_name}/monitor_for_clustering_parameter" + "_tau_" + str(tau) + ".pkl"
106
+ # with open(monitor_path, 'rb') as f:
107
+ # monitor = load(f)
108
+ # monitors_dict[class_name] = monitor
109
+ # return monitors_dict
110
+
111
+ def _fasterrcnn_reshape_transform(self, x):
112
+ target_size = x['p6'].size()[-2 : ]
113
+ activations = []
114
+ for key, value in x.items():
115
+ activations.append(torch.nn.functional.interpolate(torch.abs(value), target_size, mode='bilinear'))
116
+ activations = torch.cat(activations, axis=1)
117
+ return activations
118
+
119
+ def get_output(self, img):
120
+ image = read_image(img, format="BGR")
121
+ input_image_dict = [self._get_input_dict(image)]
122
+
123
+ pred_instances, feats = self._inference(input_image_dict)
124
+ feats = feats.cpu().detach().numpy()
125
+ detections = pred_instances[0]["instances"].to("cpu")
126
+ cls_idxs = detections.pred_classes.detach().numpy()
127
+ # get labels from class indices
128
+ labels = [self.class_dict[i] for i in cls_idxs]
129
+ # count values in labels, and return a dictionary
130
+ labels_count_dict = dict((i, labels.count(i)) for i in labels)
131
+ v = Visualizer(image[..., ::-1], MetadataCatalog.get("bdd_dataset"), scale=1)
132
+ v = v.draw_instance_predictions(detections)
133
+ img_detection = v.get_image()
134
+ df = pd.DataFrame(list(labels_count_dict.items()), columns=['Object', 'Count'])
135
+ verdicts = []
136
+ for label, feat in zip(labels, feats):
137
+ verdict = self.monitors_dict[label].make_verdicts(feat[np.newaxis,:])[0] if label in self.monitors_dict else True
138
+ verdicts.append(verdict)
139
+ detections_ood = detections[[i for i, x in enumerate(verdicts) if not x]]
140
+ detections_ood.pred_classes = torch.tensor([10]*len(detections_ood.pred_classes))
141
+ v = Visualizer(image[..., ::-1], MetadataCatalog.get("bdd_dataset"), scale=1)
142
+ v = v.draw_instance_predictions(detections_ood)
143
+ img_ood = v.get_image()
144
+ pred_bboxes = detections.pred_boxes.tensor.numpy().astype(np.int32)
145
+ target_layers = [self.model.backbone]
146
+ targets = [FasterRCNNBoxScoreTarget(labels=labels, bounding_boxes=pred_bboxes)]
147
+ cam = EigenCAM(self.model,
148
+ target_layers,
149
+ use_cuda=False,
150
+ reshape_transform=self._fasterrcnn_reshape_transform)
151
+ grayscale_cam = cam(input_image_dict, targets)
152
+ cam = self._postprocess_cam(grayscale_cam, input_image_dict[0]["width"], input_image_dict[0]["height"])
153
+ # plt.rcParams["figure.figsize"] = (30,10)
154
+ # plt.imshow(img_detection[..., ::-1], interpolation='none')
155
+ # plt.imshow(cam, cmap='jet', alpha=0.5)
156
+ # plt.axis("off")
157
+ # img_buff = io.BytesIO()
158
+ # plt.savefig(img_buff, format='png', bbox_inches='tight', pad_inches=0)
159
+ # img_cam = Image.open(img_buff)
160
+ img_cam = image
161
+ image_dict = {}
162
+ image_dict["image"] = image
163
+ image_dict["cam"] = img_cam
164
+ image_dict["detection"] = img_detection
165
+ image_dict["verdict"] = img_ood
166
+ return image_dict, df
167
+
168
+ config_file = "vanilla.yaml"
169
+ model_file = "model_final_vos_resnet_bdd.pth"
170
+ label_dict = {
171
+ 0: 'pedestrian',
172
+ 1: 'rider',
173
+ 2: 'car',
174
+ 3: 'truck',
175
+ 4: 'bus',
176
+ 5: 'train',
177
+ 6: 'motor',
178
+ 7: 'bike',
179
+ 8: 'traffic light',
180
+ 9: 'traffic sign',
181
+ 10: 'OOD'
182
+ }
183
+ label_list = list(label_dict.values())
184
+ MetadataCatalog.get("bdd_dataset").set(thing_classes=label_list)
185
+ extractor = Detectron2Monitor(config_file=config_file, label_list=label_list, label_dict=label_dict, model_file=model_file)
186
+
187
+ # %%
188
+ def inference_gd(file):
189
+ image_dict, df = extractor.get_output(file)
190
+ return image_dict["detection"], df, image_dict["verdict"], image_dict["cam"]
191
+
192
+
193
+ examples = ["examples/0.jpg", "examples/1.jpg", "examples/2.jpg", "examples/3.jpg"]
194
+ with gr.Blocks(theme="gradio/monochrome") as demo:
195
+ gr.Markdown("# Runtime Monitoring Object Detection")
196
+ gr.Markdown(
197
+ """This interactive demo is based on the box abstraction-based monitors for Faster R-CNN model. The model is trained using [Detectron2](https://github.com/facebookresearch/detectron2) library on the in-distribution dataset [Berkeley DeepDrive-100k](https://www.bdd100k.com/), which contains objects within autonomous driving domain. The monitors are constructed by abstraction of extracted feature from the training data. The demo showcases the monitors' capacity to reject problematic detections due to out-of-distribution(OOD) objects.
198
+
199
+ To utilize the demo, upload an image or select one from the provided examples, and click on "Infer" to view the following results:
200
+
201
+ - **Detection**: outputs of Object Detector
202
+ - **Detection summary**: a summary of the detection outputs
203
+ - **Verdict**: verdicts from Monitors
204
+ - **Explainable AI**: visual explanation generated by [grad-cam](https://github.com/jacobgil/pytorch-grad-cam) library which is based on Class Activation Mapping(CAM) method.
205
+
206
+ In case the output image seems too small, simply right-click on the image, and choose “Open image in new tab” to visualize it in full size.
207
+ """
208
+ )
209
+ with gr.Row():
210
+ with gr.Column():
211
+ image = gr.inputs.Image(type="filepath", label="Input")
212
+ button = gr.Button("Infer")
213
+ eamples_block = gr.Examples(examples, image)
214
+ with gr.Column():
215
+ with gr.Tab("Detection"):
216
+ detection = gr.Image(label="Output")
217
+ with gr.Tab("Verdict"):
218
+ verdict = gr.Image(label="Output")
219
+ with gr.Tab("Explainable AI"):
220
+ cam = gr.Image(label="Output")
221
+ df = gr.Dataframe(label="Detection summary")
222
+ button.click(fn=inference_gd, inputs=image, outputs=[detection, df, verdict, cam])
223
+
224
+ demo.launch()
225
+
226
+
base_cam.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import ttach as tta
4
+ from typing import Callable, List, Tuple
5
+ from pytorch_grad_cam.activations_and_gradients import ActivationsAndGradients
6
+ from pytorch_grad_cam.utils.svd_on_activations import get_2d_projection
7
+ from pytorch_grad_cam.utils.image import scale_cam_image
8
+ from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
9
+ from pytorch_grad_cam.utils.svd_on_activations import get_2d_projection
10
+
11
+ # https://arxiv.org/abs/2008.00299
12
+
13
+ class BaseCAM:
14
+ def __init__(self,
15
+ model: torch.nn.Module,
16
+ target_layers: List[torch.nn.Module],
17
+ use_cuda: bool = False,
18
+ reshape_transform: Callable = None,
19
+ compute_input_gradient: bool = False,
20
+ uses_gradients: bool = True) -> None:
21
+ self.model = model.eval()
22
+ self.target_layers = target_layers
23
+ self.cuda = use_cuda
24
+ if self.cuda:
25
+ self.model = model.cuda()
26
+ self.reshape_transform = reshape_transform
27
+ self.compute_input_gradient = compute_input_gradient
28
+ self.uses_gradients = uses_gradients
29
+ self.activations_and_grads = ActivationsAndGradients(
30
+ self.model, target_layers, reshape_transform)
31
+
32
+ """ Get a vector of weights for every channel in the target layer.
33
+ Methods that return weights channels,
34
+ will typically need to only implement this function. """
35
+
36
+ def get_cam_weights(self,
37
+ input_tensor: torch.Tensor,
38
+ target_layers: List[torch.nn.Module],
39
+ targets: List[torch.nn.Module],
40
+ activations: torch.Tensor,
41
+ grads: torch.Tensor) -> np.ndarray:
42
+ raise Exception("Not Implemented")
43
+
44
+ def get_cam_image(self,
45
+ input_tensor: torch.Tensor,
46
+ target_layer: torch.nn.Module,
47
+ targets: List[torch.nn.Module],
48
+ activations: torch.Tensor,
49
+ grads: torch.Tensor,
50
+ eigen_smooth: bool = False) -> np.ndarray:
51
+
52
+ weights = self.get_cam_weights(input_tensor,
53
+ target_layer,
54
+ targets,
55
+ activations,
56
+ grads)
57
+ weighted_activations = weights[:, :, None, None] * activations
58
+ if eigen_smooth:
59
+ cam = get_2d_projection(weighted_activations)
60
+ else:
61
+ cam = weighted_activations.sum(axis=1)
62
+ return cam
63
+
64
+ def forward(self,
65
+ input_tensor: torch.Tensor,
66
+ targets: List[torch.nn.Module],
67
+ eigen_smooth: bool = False) -> np.ndarray:
68
+ if self.cuda:
69
+ input_tensor = input_tensor.cuda()
70
+
71
+ if self.compute_input_gradient:
72
+ input_tensor = torch.autograd.Variable(input_tensor,
73
+ requires_grad=True)
74
+
75
+ outputs = self.activations_and_grads(input_tensor)
76
+ if targets is None:
77
+ target_categories = np.argmax(outputs.cpu().data.numpy(), axis=-1)
78
+ targets = [ClassifierOutputTarget(
79
+ category) for category in target_categories]
80
+
81
+ if self.uses_gradients:
82
+ self.model.zero_grad()
83
+ loss = sum([target(output)
84
+ for target, output in zip(targets, outputs)])
85
+ loss.backward(retain_graph=True)
86
+
87
+ # In most of the saliency attribution papers, the saliency is
88
+ # computed with a single target layer.
89
+ # Commonly it is the last convolutional layer.
90
+ # Here we support passing a list with multiple target layers.
91
+ # It will compute the saliency image for every image,
92
+ # and then aggregate them (with a default mean aggregation).
93
+ # This gives you more flexibility in case you just want to
94
+ # use all conv layers for example, all Batchnorm layers,
95
+ # or something else.
96
+ cam_per_layer = self.compute_cam_per_layer(input_tensor,
97
+ targets,
98
+ eigen_smooth)
99
+ return self.aggregate_multi_layers(cam_per_layer)
100
+
101
+ def get_target_width_height(self,
102
+ input_tensor: torch.Tensor) -> Tuple[int, int]:
103
+ width, height = input_tensor.size(-1), input_tensor.size(-2)
104
+ return width, height
105
+
106
+ def compute_cam_per_layer(
107
+ self,
108
+ input_tensor: torch.Tensor,
109
+ targets: List[torch.nn.Module],
110
+ eigen_smooth: bool) -> np.ndarray:
111
+ activations_list = [a.cpu().data.numpy()
112
+ for a in self.activations_and_grads.activations]
113
+ grads_list = [g.cpu().data.numpy()
114
+ for g in self.activations_and_grads.gradients]
115
+ target_size = self.get_target_width_height(input_tensor[0]["image"])
116
+
117
+
118
+ cam_per_target_layer = []
119
+ # Loop over the saliency image from every layer
120
+ for i in range(len(self.target_layers)):
121
+ target_layer = self.target_layers[i]
122
+ layer_activations = None
123
+ layer_grads = None
124
+ if i < len(activations_list):
125
+ layer_activations = activations_list[i]
126
+ if i < len(grads_list):
127
+ layer_grads = grads_list[i]
128
+
129
+ cam = self.get_cam_image(input_tensor,
130
+ target_layer,
131
+ targets,
132
+ layer_activations,
133
+ layer_grads,
134
+ eigen_smooth)
135
+ cam = np.maximum(cam, 0)
136
+ scaled = scale_cam_image(cam, target_size)
137
+ cam_per_target_layer.append(scaled[:, None, :])
138
+
139
+ return cam_per_target_layer
140
+
141
+ def aggregate_multi_layers(
142
+ self,
143
+ cam_per_target_layer: np.ndarray) -> np.ndarray:
144
+ cam_per_target_layer = np.concatenate(cam_per_target_layer, axis=1)
145
+ cam_per_target_layer = np.maximum(cam_per_target_layer, 0)
146
+ result = np.mean(cam_per_target_layer, axis=1)
147
+ return scale_cam_image(result)
148
+
149
+ def forward_augmentation_smoothing(self,
150
+ input_tensor: torch.Tensor,
151
+ targets: List[torch.nn.Module],
152
+ eigen_smooth: bool = False) -> np.ndarray:
153
+ transforms = tta.Compose(
154
+ [
155
+ tta.HorizontalFlip(),
156
+ tta.Multiply(factors=[0.9, 1, 1.1]),
157
+ ]
158
+ )
159
+ cams = []
160
+ for transform in transforms:
161
+ augmented_tensor = transform.augment_image(input_tensor)
162
+ cam = self.forward(augmented_tensor,
163
+ targets,
164
+ eigen_smooth)
165
+
166
+ # The ttach library expects a tensor of size BxCxHxW
167
+ cam = cam[:, None, :, :]
168
+ cam = torch.from_numpy(cam)
169
+ cam = transform.deaugment_mask(cam)
170
+
171
+ # Back to numpy float32, HxW
172
+ cam = cam.numpy()
173
+ cam = cam[:, 0, :, :]
174
+ cams.append(cam)
175
+
176
+ cam = np.mean(np.float32(cams), axis=0)
177
+ return cam
178
+
179
+ def __call__(self,
180
+ input_tensor: torch.Tensor,
181
+ targets: List[torch.nn.Module] = None,
182
+ aug_smooth: bool = False,
183
+ eigen_smooth: bool = False) -> np.ndarray:
184
+
185
+ # Smooth the CAM result with test time augmentation
186
+ if aug_smooth is True:
187
+ return self.forward_augmentation_smoothing(
188
+ input_tensor, targets, eigen_smooth)
189
+
190
+ return self.forward(input_tensor,
191
+ targets, eigen_smooth)
192
+
193
+ def __del__(self):
194
+ self.activations_and_grads.release()
195
+
196
+ def __enter__(self):
197
+ return self
198
+
199
+ def __exit__(self, exc_type, exc_value, exc_tb):
200
+ self.activations_and_grads.release()
201
+ if isinstance(exc_value, IndexError):
202
+ # Handle IndexError here...
203
+ print(
204
+ f"An exception occurred in CAM with block: {exc_type}. Message: {exc_value}")
205
+ return True
206
+
207
+ class EigenCAM(BaseCAM):
208
+ def __init__(self, model, target_layers, use_cuda=False,
209
+ reshape_transform=None):
210
+ super(EigenCAM, self).__init__(model,
211
+ target_layers,
212
+ use_cuda,
213
+ reshape_transform,
214
+ uses_gradients=False)
215
+
216
+ def get_cam_image(self,
217
+ input_tensor,
218
+ target_layer,
219
+ target_category,
220
+ activations,
221
+ grads,
222
+ eigen_smooth):
223
+ return get_2d_projection(activations)
examples/0.jpg ADDED
examples/0.jpg:Zone.Identifier ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ [ZoneTransfer]
2
+ ZoneId=3
3
+ ReferrerUrl=https://www.file.io/
4
+ HostUrl=https://file.io/oAt6o2nEO4Dh
examples/1.jpg ADDED
examples/1.jpg:Zone.Identifier ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [ZoneTransfer]
2
+ ZoneId=3
3
+ ReferrerUrl=C:\Users\adm-hew\Downloads\AVP\03_Scenario_onlycamsForUGA.zip
examples/2.jpg ADDED
examples/2.jpg:Zone.Identifier ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [ZoneTransfer]
2
+ ZoneId=3
3
+ ReferrerUrl=C:\Users\adm-hew\Downloads\AVP\03_Scenario_onlycamsForUGA.zip
examples/3.jpg ADDED
examples/3.jpg:Zone.Identifier ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [ZoneTransfer]
2
+ ZoneId=3
3
+ ReferrerUrl=C:\Users\adm-hew\Downloads\AVP\03_Scenario_onlycamsForUGA.zip
examples/4.jpg ADDED
examples/4.jpg:Zone.Identifier ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [ZoneTransfer]
2
+ ZoneId=3
3
+ ReferrerUrl=C:\Users\adm-hew\Downloads\AVP\03_Scenario_onlycamsForUGA.zip
runtime_monitors/Monitor.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+
4
+ from abstractions import *
5
+ import pickle
6
+ import numpy as np
7
+
8
+ class Monitor(object):
9
+
10
+ def __init__(self, good_ref=None):
11
+ # self.abs_type = abs_type
12
+ self.good_ref = good_ref
13
+
14
+
15
+ def set_reference(self, good_ref):
16
+ self.good_ref = good_ref
17
+
18
+ # def get_identity(self):
19
+ # print("Monitor for network:" + self.netName + "class: " + str(self.classification) + "at layer " + str(self.location))
20
+
21
+
22
+ def make_verdicts(self, features):
23
+ if len(self.good_ref):
24
+ verdicts = ref_query(features, self.good_ref)
25
+ else:
26
+ raise RuntimeError("No reference exists!")
27
+ return verdicts
28
+
29
+ def ref_query(features, reference):
30
+ query_results = [boxes_query(x, reference) for x in features]
31
+ return query_results
32
+
33
+
34
+ # def query_infusion(in_good_ref, in_bad_ref):
35
+ # if len(in_good_ref) == len(in_bad_ref): #0: acceptance (true, false), 1: rejection (false, true or false), 2: uncertainty (true, true)
36
+ # verdicts = np.zeros(len(in_good_ref), dtype=int)
37
+ # for i in range(len(in_good_ref)):
38
+ # if not in_good_ref[i]:
39
+ # verdicts[i] = 1
40
+ # elif in_bad_ref[i]:
41
+ # verdicts[i] = 2
42
+ # return verdicts
43
+ # else:
44
+ # print("Error: IllegalArgument")
runtime_monitors/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .Monitor import *
2
+
runtime_monitors/__pycache__/Monitor.cpython-38.pyc ADDED
Binary file (1.22 kB). View file
 
runtime_monitors/__pycache__/Monitor.cpython-39.pyc ADDED
Binary file (1.19 kB). View file
 
runtime_monitors/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (199 Bytes). View file
 
runtime_monitors/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (173 Bytes). View file
 
vanilla.yaml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-RCNN-FPN.yaml"
2
+ MODEL:
3
+ META_ARCHITECTURE: "GeneralizedRCNN"
4
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
5
+ # WEIGHTS: "./data/VOC-Detection/faster-rcnn/faster_rcnn_R_50_FPN_all_logistic/random_seed_0/model_final.pth"
6
+
7
+ # PROPOSAL_GENERATOR:
8
+ # NAME: "RPNLogistic"
9
+ MASK_ON: False
10
+ RESNETS:
11
+ DEPTH: 50
12
+ ROI_HEADS:
13
+ NAME: "StandardROIHeads"
14
+ NUM_CLASSES: 10
15
+ INPUT:
16
+ MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800)
17
+ MIN_SIZE_TEST: 800
18
+ DATASETS:
19
+ TRAIN: ('bdd_custom_train',)
20
+ TEST: ('bdd_custom_val',)
21
+ SOLVER:
22
+ IMS_PER_BATCH: 16
23
+ BASE_LR: 0.02
24
+ STEPS: (60000, 80000)
25
+ MAX_ITER: 90000 # 17.4 epochs
26
+ WARMUP_ITERS: 100
27
+ DATALOADER:
28
+ NUM_WORKERS: 8 # Depends on the available memory