Bounding box detection
PyTorch
Ontocord.AI commited on
Commit
cae5ca5
·
1 Parent(s): bf40459

Create modeling_fcrnn.py

Browse files
Files changed (1) hide show
  1. modeling_fcrnn.py +1926 -0
modeling_fcrnn.py ADDED
@@ -0,0 +1,1926 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ coding=utf-8
3
+ Copyright 2022, Ontocord, LLC
4
+ Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal
5
+ Adapted From Facebook Inc, Detectron2 && Huggingface Co.
6
+
7
+ Licensed under the Apache License, Version 2.0 (the "License");
8
+ you may not use this file except in compliance with the License.
9
+ You may obtain a copy of the License at
10
+
11
+ http://www.apache.org/licenses/LICENSE-2.0
12
+
13
+ Unless required by applicable law or agreed to in writing, software
14
+ distributed under the License is distributed on an "AS IS" BASIS,
15
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
+ See the License for the specific language governing permissions and
17
+ limitations under the License.import copy
18
+ """
19
+ import itertools
20
+ import math
21
+ import os
22
+ from abc import ABCMeta, abstractmethod
23
+ from collections import OrderedDict, namedtuple
24
+ from typing import Dict, List, Tuple
25
+
26
+ import numpy as np
27
+ import torch
28
+ from torch import nn
29
+ from torch.nn.modules.batchnorm import BatchNorm2d
30
+ from torchvision.ops import RoIPool
31
+ from torchvision.ops.boxes import batched_nms, nms
32
+
33
+ from .utils import WEIGHTS_NAME, Config, cached_path, hf_bucket_url, is_remote_url, load_checkpoint
34
+
35
+
36
+ # other:
37
+ def norm_box(boxes, raw_sizes):
38
+ if not isinstance(boxes, torch.Tensor):
39
+ normalized_boxes = boxes.copy()
40
+ else:
41
+ normalized_boxes = boxes.clone()
42
+ normalized_boxes[:, :, (0, 2)] /= raw_sizes[:, 1].view(-1, 1, 1)
43
+ normalized_boxes[:, :, (1, 3)] /= raw_sizes[:, 0].view(-1, 1, 1)
44
+ return normalized_boxes
45
+
46
+
47
+ def pad_list_tensors(
48
+ list_tensors,
49
+ preds_per_image,
50
+ max_detections=None,
51
+ return_tensors=None,
52
+ padding=None,
53
+ pad_value=0,
54
+ location=None,
55
+ ):
56
+ """
57
+ location will always be cpu for np tensors
58
+ """
59
+ if location is None:
60
+ location = "cpu"
61
+ assert return_tensors in {"pt", "np", None}
62
+ assert padding in {"max_detections", "max_batch", None}
63
+ new = []
64
+ if padding is None:
65
+ if return_tensors is None:
66
+ return list_tensors
67
+ elif return_tensors == "pt":
68
+ if not isinstance(list_tensors, torch.Tensor):
69
+ return torch.stack(list_tensors).to(location)
70
+ else:
71
+ return list_tensors.to(location)
72
+ else:
73
+ if not isinstance(list_tensors, list):
74
+ return np.array(list_tensors.to(location))
75
+ else:
76
+ return list_tensors.to(location)
77
+ if padding == "max_detections":
78
+ assert max_detections is not None, "specify max number of detections per batch"
79
+ elif padding == "max_batch":
80
+ max_detections = max(preds_per_image)
81
+ for i in range(len(list_tensors)):
82
+ too_small = False
83
+ tensor_i = list_tensors.pop(0)
84
+ if tensor_i.ndim < 2:
85
+ too_small = True
86
+ tensor_i = tensor_i.unsqueeze(-1)
87
+ assert isinstance(tensor_i, torch.Tensor)
88
+ tensor_i = nn.functional.pad(
89
+ input=tensor_i,
90
+ pad=(0, 0, 0, max_detections - preds_per_image[i]),
91
+ mode="constant",
92
+ value=pad_value,
93
+ )
94
+ if too_small:
95
+ tensor_i = tensor_i.squeeze(-1)
96
+ if return_tensors is None:
97
+ if location == "cpu":
98
+ tensor_i = tensor_i.cpu()
99
+ tensor_i = tensor_i.tolist()
100
+ if return_tensors == "np":
101
+ if location == "cpu":
102
+ tensor_i = tensor_i.cpu()
103
+ tensor_i = tensor_i.numpy()
104
+ else:
105
+ if location == "cpu":
106
+ tensor_i = tensor_i.cpu()
107
+ new.append(tensor_i)
108
+ if return_tensors == "np":
109
+ return np.stack(new, axis=0)
110
+ elif return_tensors == "pt" and not isinstance(new, torch.Tensor):
111
+ return torch.stack(new, dim=0)
112
+ else:
113
+ return list_tensors
114
+
115
+
116
+ def do_nms(boxes, scores, image_shape, score_thresh, nms_thresh, mind, maxd):
117
+ scores = scores[:, :-1]
118
+ num_bbox_reg_classes = boxes.shape[1] // 4
119
+ # Convert to Boxes to use the `clip` function ...
120
+ boxes = boxes.reshape(-1, 4)
121
+ _clip_box(boxes, image_shape)
122
+ boxes = boxes.view(-1, num_bbox_reg_classes, 4) # R x C x 4
123
+
124
+ # Select max scores
125
+ max_scores, max_classes = scores.max(1) # R x C --> R
126
+ num_objs = boxes.size(0)
127
+ boxes = boxes.view(-1, 4)
128
+ idxs = torch.arange(num_objs).to(boxes.device) * num_bbox_reg_classes + max_classes
129
+ max_boxes = boxes[idxs] # Select max boxes according to the max scores.
130
+
131
+ # Apply NMS
132
+ keep = nms(max_boxes, max_scores, nms_thresh)
133
+ keep = keep[:maxd]
134
+ if keep.shape[-1] >= mind and keep.shape[-1] <= maxd:
135
+ max_boxes, max_scores = max_boxes[keep], max_scores[keep]
136
+ classes = max_classes[keep]
137
+ return max_boxes, max_scores, classes, keep
138
+ else:
139
+ return None
140
+
141
+
142
+ # Helper Functions
143
+ def _clip_box(tensor, box_size: Tuple[int, int]):
144
+ assert torch.isfinite(tensor).all(), "Box tensor contains infinite or NaN!"
145
+ h, w = box_size
146
+ tensor[:, 0].clamp_(min=0, max=w)
147
+ tensor[:, 1].clamp_(min=0, max=h)
148
+ tensor[:, 2].clamp_(min=0, max=w)
149
+ tensor[:, 3].clamp_(min=0, max=h)
150
+
151
+
152
+ def _nonempty_boxes(box, threshold: float = 0.0) -> torch.Tensor:
153
+ widths = box[:, 2] - box[:, 0]
154
+ heights = box[:, 3] - box[:, 1]
155
+ keep = (widths > threshold) & (heights > threshold)
156
+ return keep
157
+
158
+
159
+ def get_norm(norm, out_channels):
160
+ if isinstance(norm, str):
161
+ if len(norm) == 0:
162
+ return None
163
+ norm = {
164
+ "BN": BatchNorm2d,
165
+ "GN": lambda channels: nn.GroupNorm(32, channels),
166
+ "nnSyncBN": nn.SyncBatchNorm, # keep for debugging
167
+ "": lambda x: x,
168
+ }[norm]
169
+ return norm(out_channels)
170
+
171
+
172
+ def _create_grid_offsets(size: List[int], stride: int, offset: float, device):
173
+
174
+ grid_height, grid_width = size
175
+ shifts_x = torch.arange(
176
+ offset * stride,
177
+ grid_width * stride,
178
+ step=stride,
179
+ dtype=torch.float32,
180
+ device=device,
181
+ )
182
+ shifts_y = torch.arange(
183
+ offset * stride,
184
+ grid_height * stride,
185
+ step=stride,
186
+ dtype=torch.float32,
187
+ device=device,
188
+ )
189
+
190
+ shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
191
+ shift_x = shift_x.reshape(-1)
192
+ shift_y = shift_y.reshape(-1)
193
+ return shift_x, shift_y
194
+
195
+
196
+ def build_backbone(cfg):
197
+ input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN))
198
+ norm = cfg.RESNETS.NORM
199
+ stem = BasicStem(
200
+ in_channels=input_shape.channels,
201
+ out_channels=cfg.RESNETS.STEM_OUT_CHANNELS,
202
+ norm=norm,
203
+ caffe_maxpool=cfg.MODEL.MAX_POOL,
204
+ )
205
+ freeze_at = cfg.BACKBONE.FREEZE_AT
206
+
207
+ if freeze_at >= 1:
208
+ for p in stem.parameters():
209
+ p.requires_grad = False
210
+
211
+ out_features = cfg.RESNETS.OUT_FEATURES
212
+ depth = cfg.RESNETS.DEPTH
213
+ num_groups = cfg.RESNETS.NUM_GROUPS
214
+ width_per_group = cfg.RESNETS.WIDTH_PER_GROUP
215
+ bottleneck_channels = num_groups * width_per_group
216
+ in_channels = cfg.RESNETS.STEM_OUT_CHANNELS
217
+ out_channels = cfg.RESNETS.RES2_OUT_CHANNELS
218
+ stride_in_1x1 = cfg.RESNETS.STRIDE_IN_1X1
219
+ res5_dilation = cfg.RESNETS.RES5_DILATION
220
+ assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation)
221
+
222
+ num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth]
223
+
224
+ stages = []
225
+ out_stage_idx = [{"res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features]
226
+ max_stage_idx = max(out_stage_idx)
227
+ for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)):
228
+ dilation = res5_dilation if stage_idx == 5 else 1
229
+ first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2
230
+ stage_kargs = {
231
+ "num_blocks": num_blocks_per_stage[idx],
232
+ "first_stride": first_stride,
233
+ "in_channels": in_channels,
234
+ "bottleneck_channels": bottleneck_channels,
235
+ "out_channels": out_channels,
236
+ "num_groups": num_groups,
237
+ "norm": norm,
238
+ "stride_in_1x1": stride_in_1x1,
239
+ "dilation": dilation,
240
+ }
241
+
242
+ stage_kargs["block_class"] = BottleneckBlock
243
+ blocks = ResNet.make_stage(**stage_kargs)
244
+ in_channels = out_channels
245
+ out_channels *= 2
246
+ bottleneck_channels *= 2
247
+
248
+ if freeze_at >= stage_idx:
249
+ for block in blocks:
250
+ block.freeze()
251
+ stages.append(blocks)
252
+
253
+ return ResNet(stem, stages, out_features=out_features)
254
+
255
+
256
+ def find_top_rpn_proposals(
257
+ proposals,
258
+ pred_objectness_logits,
259
+ images,
260
+ image_sizes,
261
+ nms_thresh,
262
+ pre_nms_topk,
263
+ post_nms_topk,
264
+ min_box_side_len,
265
+ training,
266
+ ):
267
+ """Args:
268
+ proposals (list[Tensor]): (L, N, Hi*Wi*A, 4).
269
+ pred_objectness_logits: tensors of length L.
270
+ nms_thresh (float): IoU threshold to use for NMS
271
+ pre_nms_topk (int): before nms
272
+ post_nms_topk (int): after nms
273
+ min_box_side_len (float): minimum proposal box side
274
+ training (bool): True if proposals are to be used in training,
275
+ Returns:
276
+ results (List[Dict]): stores post_nms_topk object proposals for image i.
277
+ """
278
+ num_images = len(images)
279
+ device = proposals[0].device
280
+
281
+ # 1. Select top-k anchor for every level and every image
282
+ topk_scores = [] # #lvl Tensor, each of shape N x topk
283
+ topk_proposals = []
284
+ level_ids = [] # #lvl Tensor, each of shape (topk,)
285
+ batch_idx = torch.arange(num_images, device=device)
286
+ for level_id, proposals_i, logits_i in zip(itertools.count(), proposals, pred_objectness_logits):
287
+ Hi_Wi_A = logits_i.shape[1]
288
+ num_proposals_i = min(pre_nms_topk, Hi_Wi_A)
289
+
290
+ # sort is faster than topk (https://github.com/pytorch/pytorch/issues/22812)
291
+ # topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1)
292
+ logits_i, idx = logits_i.sort(descending=True, dim=1)
293
+ topk_scores_i = logits_i[batch_idx, :num_proposals_i]
294
+ topk_idx = idx[batch_idx, :num_proposals_i]
295
+
296
+ # each is N x topk
297
+ topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 4
298
+
299
+ topk_proposals.append(topk_proposals_i)
300
+ topk_scores.append(topk_scores_i)
301
+ level_ids.append(torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device))
302
+
303
+ # 2. Concat all levels together
304
+ topk_scores = torch.cat(topk_scores, dim=1)
305
+ topk_proposals = torch.cat(topk_proposals, dim=1)
306
+ level_ids = torch.cat(level_ids, dim=0)
307
+
308
+ # if I change to batched_nms, I wonder if this will make a difference
309
+ # 3. For each image, run a per-level NMS, and choose topk results.
310
+ results = []
311
+ for n, image_size in enumerate(image_sizes):
312
+ boxes = topk_proposals[n]
313
+ scores_per_img = topk_scores[n]
314
+ # I will have to take a look at the boxes clip method
315
+ _clip_box(boxes, image_size)
316
+ # filter empty boxes
317
+ keep = _nonempty_boxes(boxes, threshold=min_box_side_len)
318
+ lvl = level_ids
319
+ if keep.sum().item() != len(boxes):
320
+ boxes, scores_per_img, lvl = (
321
+ boxes[keep],
322
+ scores_per_img[keep],
323
+ level_ids[keep],
324
+ )
325
+
326
+ keep = batched_nms(boxes, scores_per_img, lvl, nms_thresh)
327
+ keep = keep[:post_nms_topk]
328
+
329
+ res = (boxes[keep], scores_per_img[keep])
330
+ results.append(res)
331
+
332
+ # I wonder if it would be possible for me to pad all these things.
333
+ return results
334
+
335
+
336
+ def subsample_labels(labels, num_samples, positive_fraction, bg_label):
337
+ """
338
+ Returns:
339
+ pos_idx, neg_idx (Tensor):
340
+ 1D vector of indices. The total length of both is `num_samples` or fewer.
341
+ """
342
+ positive = torch.nonzero((labels != -1) & (labels != bg_label)).squeeze(1)
343
+ negative = torch.nonzero(labels == bg_label).squeeze(1)
344
+
345
+ num_pos = int(num_samples * positive_fraction)
346
+ # protect against not enough positive examples
347
+ num_pos = min(positive.numel(), num_pos)
348
+ num_neg = num_samples - num_pos
349
+ # protect against not enough negative examples
350
+ num_neg = min(negative.numel(), num_neg)
351
+
352
+ # randomly select positive and negative examples
353
+ perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos]
354
+ perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg]
355
+
356
+ pos_idx = positive[perm1]
357
+ neg_idx = negative[perm2]
358
+ return pos_idx, neg_idx
359
+
360
+
361
+ def add_ground_truth_to_proposals(gt_boxes, proposals):
362
+ raise NotImplementedError()
363
+
364
+
365
+ def add_ground_truth_to_proposals_single_image(gt_boxes, proposals):
366
+ raise NotImplementedError()
367
+
368
+
369
+ def _fmt_box_list(box_tensor, batch_index: int):
370
+ repeated_index = torch.full(
371
+ (len(box_tensor), 1),
372
+ batch_index,
373
+ dtype=box_tensor.dtype,
374
+ device=box_tensor.device,
375
+ )
376
+ return torch.cat((repeated_index, box_tensor), dim=1)
377
+
378
+
379
+ def convert_boxes_to_pooler_format(box_lists: List[torch.Tensor]):
380
+ pooler_fmt_boxes = torch.cat(
381
+ [_fmt_box_list(box_list, i) for i, box_list in enumerate(box_lists)],
382
+ dim=0,
383
+ )
384
+ return pooler_fmt_boxes
385
+
386
+
387
+ def assign_boxes_to_levels(
388
+ box_lists: List[torch.Tensor],
389
+ min_level: int,
390
+ max_level: int,
391
+ canonical_box_size: int,
392
+ canonical_level: int,
393
+ ):
394
+
395
+ box_sizes = torch.sqrt(torch.cat([boxes.area() for boxes in box_lists]))
396
+ # Eqn.(1) in FPN paper
397
+ level_assignments = torch.floor(canonical_level + torch.log2(box_sizes / canonical_box_size + 1e-8))
398
+ # clamp level to (min, max), in case the box size is too large or too small
399
+ # for the available feature maps
400
+ level_assignments = torch.clamp(level_assignments, min=min_level, max=max_level)
401
+ return level_assignments.to(torch.int64) - min_level
402
+
403
+
404
+ # Helper Classes
405
+ class _NewEmptyTensorOp(torch.autograd.Function):
406
+ @staticmethod
407
+ def forward(ctx, x, new_shape):
408
+ ctx.shape = x.shape
409
+ return x.new_empty(new_shape)
410
+
411
+ @staticmethod
412
+ def backward(ctx, grad):
413
+ shape = ctx.shape
414
+ return _NewEmptyTensorOp.apply(grad, shape), None
415
+
416
+
417
+ class ShapeSpec(namedtuple("_ShapeSpec", ["channels", "height", "width", "stride"])):
418
+ def __new__(cls, *, channels=None, height=None, width=None, stride=None):
419
+ return super().__new__(cls, channels, height, width, stride)
420
+
421
+
422
+ class Box2BoxTransform(object):
423
+ """
424
+ This R-CNN transformation scales the box's width and height
425
+ by exp(dw), exp(dh) and shifts a box's center by the offset
426
+ (dx * width, dy * height).
427
+ """
428
+
429
+ def __init__(self, weights: Tuple[float, float, float, float], scale_clamp: float = None):
430
+ """
431
+ Args:
432
+ weights (4-element tuple): Scaling factors that are applied to the
433
+ (dx, dy, dw, dh) deltas. In Fast R-CNN, these were originally set
434
+ such that the deltas have unit variance; now they are treated as
435
+ hyperparameters of the system.
436
+ scale_clamp (float): When predicting deltas, the predicted box scaling
437
+ factors (dw and dh) are clamped such that they are <= scale_clamp.
438
+ """
439
+ self.weights = weights
440
+ if scale_clamp is not None:
441
+ self.scale_clamp = scale_clamp
442
+ else:
443
+ """
444
+ Value for clamping large dw and dh predictions.
445
+ The heuristic is that we clamp such that dw and dh are no larger
446
+ than what would transform a 16px box into a 1000px box
447
+ (based on a small anchor, 16px, and a typical image size, 1000px).
448
+ """
449
+ self.scale_clamp = math.log(1000.0 / 16)
450
+
451
+ def get_deltas(self, src_boxes, target_boxes):
452
+ """
453
+ Get box regression transformation deltas (dx, dy, dw, dh) that can be used
454
+ to transform the `src_boxes` into the `target_boxes`. That is, the relation
455
+ ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless
456
+ any delta is too large and is clamped).
457
+ Args:
458
+ src_boxes (Tensor): source boxes, e.g., object proposals
459
+ target_boxes (Tensor): target of the transformation, e.g., ground-truth
460
+ boxes.
461
+ """
462
+ assert isinstance(src_boxes, torch.Tensor), type(src_boxes)
463
+ assert isinstance(target_boxes, torch.Tensor), type(target_boxes)
464
+
465
+ src_widths = src_boxes[:, 2] - src_boxes[:, 0]
466
+ src_heights = src_boxes[:, 3] - src_boxes[:, 1]
467
+ src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths
468
+ src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights
469
+
470
+ target_widths = target_boxes[:, 2] - target_boxes[:, 0]
471
+ target_heights = target_boxes[:, 3] - target_boxes[:, 1]
472
+ target_ctr_x = target_boxes[:, 0] + 0.5 * target_widths
473
+ target_ctr_y = target_boxes[:, 1] + 0.5 * target_heights
474
+
475
+ wx, wy, ww, wh = self.weights
476
+ dx = wx * (target_ctr_x - src_ctr_x) / src_widths
477
+ dy = wy * (target_ctr_y - src_ctr_y) / src_heights
478
+ dw = ww * torch.log(target_widths / src_widths)
479
+ dh = wh * torch.log(target_heights / src_heights)
480
+
481
+ deltas = torch.stack((dx, dy, dw, dh), dim=1)
482
+ assert (src_widths > 0).all().item(), "Input boxes to Box2BoxTransform are not valid!"
483
+ return deltas
484
+
485
+ def apply_deltas(self, deltas, boxes):
486
+ """
487
+ Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`.
488
+ Args:
489
+ deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1.
490
+ deltas[i] represents k potentially different class-specific
491
+ box transformations for the single box boxes[i].
492
+ boxes (Tensor): boxes to transform, of shape (N, 4)
493
+ """
494
+ boxes = boxes.to(deltas.dtype)
495
+
496
+ widths = boxes[:, 2] - boxes[:, 0]
497
+ heights = boxes[:, 3] - boxes[:, 1]
498
+ ctr_x = boxes[:, 0] + 0.5 * widths
499
+ ctr_y = boxes[:, 1] + 0.5 * heights
500
+
501
+ wx, wy, ww, wh = self.weights
502
+ dx = deltas[:, 0::4] / wx
503
+ dy = deltas[:, 1::4] / wy
504
+ dw = deltas[:, 2::4] / ww
505
+ dh = deltas[:, 3::4] / wh
506
+
507
+ # Prevent sending too large values into torch.exp()
508
+ dw = torch.clamp(dw, max=self.scale_clamp)
509
+ dh = torch.clamp(dh, max=self.scale_clamp)
510
+
511
+ pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
512
+ pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
513
+ pred_w = torch.exp(dw) * widths[:, None]
514
+ pred_h = torch.exp(dh) * heights[:, None]
515
+
516
+ pred_boxes = torch.zeros_like(deltas)
517
+ pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w # x1
518
+ pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h # y1
519
+ pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w # x2
520
+ pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h # y2
521
+ return pred_boxes
522
+
523
+
524
+ class Matcher(object):
525
+ """
526
+ This class assigns to each predicted "element" (e.g., a box) a ground-truth
527
+ element. Each predicted element will have exactly zero or one matches; each
528
+ ground-truth element may be matched to zero or more predicted elements.
529
+ The matching is determined by the MxN match_quality_matrix, that characterizes
530
+ how well each (ground-truth, prediction)-pair match each other. For example,
531
+ if the elements are boxes, this matrix may contain box intersection-over-union
532
+ overlap values.
533
+ The matcher returns (a) a vector of length N containing the index of the
534
+ ground-truth element m in [0, M) that matches to prediction n in [0, N).
535
+ (b) a vector of length N containing the labels for each prediction.
536
+ """
537
+
538
+ def __init__(
539
+ self,
540
+ thresholds: List[float],
541
+ labels: List[int],
542
+ allow_low_quality_matches: bool = False,
543
+ ):
544
+ """
545
+ Args:
546
+ thresholds (list): a list of thresholds used to stratify predictions
547
+ into levels.
548
+ labels (list): a list of values to label predictions belonging at
549
+ each level. A label can be one of {-1, 0, 1} signifying
550
+ {ignore, negative class, positive class}, respectively.
551
+ allow_low_quality_matches (bool): if True, produce additional matches or predictions with maximum match quality lower than high_threshold.
552
+ For example, thresholds = [0.3, 0.5] labels = [0, -1, 1] All predictions with iou < 0.3 will be marked with 0 and
553
+ thus will be considered as false positives while training. All predictions with 0.3 <= iou < 0.5 will be marked with -1 and
554
+ thus will be ignored. All predictions with 0.5 <= iou will be marked with 1 and thus will be considered as true positives.
555
+ """
556
+ thresholds = thresholds[:]
557
+ assert thresholds[0] > 0
558
+ thresholds.insert(0, -float("inf"))
559
+ thresholds.append(float("inf"))
560
+ assert all([low <= high for (low, high) in zip(thresholds[:-1], thresholds[1:])])
561
+ assert all([label_i in [-1, 0, 1] for label_i in labels])
562
+ assert len(labels) == len(thresholds) - 1
563
+ self.thresholds = thresholds
564
+ self.labels = labels
565
+ self.allow_low_quality_matches = allow_low_quality_matches
566
+
567
+ def __call__(self, match_quality_matrix):
568
+ """
569
+ Args:
570
+ match_quality_matrix (Tensor[float]): an MxN tensor, containing the pairwise quality between M ground-truth elements and N predicted
571
+ elements. All elements must be >= 0 (due to the us of `torch.nonzero` for selecting indices in :meth:`set_low_quality_matches_`).
572
+ Returns:
573
+ matches (Tensor[int64]): a vector of length N, where matches[i] is a matched ground-truth index in [0, M)
574
+ match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates true or false positive or ignored
575
+ """
576
+ assert match_quality_matrix.dim() == 2
577
+ if match_quality_matrix.numel() == 0:
578
+ default_matches = match_quality_matrix.new_full((match_quality_matrix.size(1),), 0, dtype=torch.int64)
579
+ # When no gt boxes exist, we define IOU = 0 and therefore set labels
580
+ # to `self.labels[0]`, which usually defaults to background class 0
581
+ # To choose to ignore instead,
582
+ # can make labels=[-1,0,-1,1] + set appropriate thresholds
583
+ default_match_labels = match_quality_matrix.new_full(
584
+ (match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8
585
+ )
586
+ return default_matches, default_match_labels
587
+
588
+ assert torch.all(match_quality_matrix >= 0)
589
+
590
+ # match_quality_matrix is M (gt) x N (predicted)
591
+ # Max over gt elements (dim 0) to find best gt candidate for each prediction
592
+ matched_vals, matches = match_quality_matrix.max(dim=0)
593
+
594
+ match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8)
595
+
596
+ for (l, low, high) in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]):
597
+ low_high = (matched_vals >= low) & (matched_vals < high)
598
+ match_labels[low_high] = l
599
+
600
+ if self.allow_low_quality_matches:
601
+ self.set_low_quality_matches_(match_labels, match_quality_matrix)
602
+
603
+ return matches, match_labels
604
+
605
+ def set_low_quality_matches_(self, match_labels, match_quality_matrix):
606
+ """
607
+ Produce additional matches for predictions that have only low-quality matches.
608
+ Specifically, for each ground-truth G find the set of predictions that have
609
+ maximum overlap with it (including ties); for each prediction in that set, if
610
+ it is unmatched, then match it to the ground-truth G.
611
+ This function implements the RPN assignment case (i)
612
+ in Sec. 3.1.2 of Faster R-CNN.
613
+ """
614
+ # For each gt, find the prediction with which it has highest quality
615
+ highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1)
616
+ # Find the highest quality match available, even if it is low, including ties.
617
+ # Note that the matches qualities must be positive due to the use of
618
+ # `torch.nonzero`.
619
+ of_quality_inds = match_quality_matrix == highest_quality_foreach_gt[:, None]
620
+ if of_quality_inds.dim() == 0:
621
+ (_, pred_inds_with_highest_quality) = of_quality_inds.unsqueeze(0).nonzero().unbind(1)
622
+ else:
623
+ (_, pred_inds_with_highest_quality) = of_quality_inds.nonzero().unbind(1)
624
+ match_labels[pred_inds_with_highest_quality] = 1
625
+
626
+
627
+ class RPNOutputs(object):
628
+ def __init__(
629
+ self,
630
+ box2box_transform,
631
+ anchor_matcher,
632
+ batch_size_per_image,
633
+ positive_fraction,
634
+ images,
635
+ pred_objectness_logits,
636
+ pred_anchor_deltas,
637
+ anchors,
638
+ boundary_threshold=0,
639
+ gt_boxes=None,
640
+ smooth_l1_beta=0.0,
641
+ ):
642
+ """
643
+ Args:
644
+ box2box_transform (Box2BoxTransform): :class:`Box2BoxTransform` instance for anchor-proposal transformations.
645
+ anchor_matcher (Matcher): :class:`Matcher` instance for matching anchors to ground-truth boxes; used to determine training labels.
646
+ batch_size_per_image (int): number of proposals to sample when training
647
+ positive_fraction (float): target fraction of sampled proposals that should be positive
648
+ images (ImageList): :class:`ImageList` instance representing N input images
649
+ pred_objectness_logits (list[Tensor]): A list of L elements. Element i is a tensor of shape (N, A, Hi, W)
650
+ pred_anchor_deltas (list[Tensor]): A list of L elements. Element i is a tensor of shape (N, A*4, Hi, Wi)
651
+ anchors (list[torch.Tensor]): nested list of boxes. anchors[i][j] at (n, l) stores anchor array for feature map l
652
+ boundary_threshold (int): if >= 0, then anchors that extend beyond the image boundary by more than boundary_thresh are not used in training.
653
+ gt_boxes (list[Boxes], optional): A list of N elements.
654
+ smooth_l1_beta (float): The transition point between L1 and L2 lossn. When set to 0, the loss becomes L1. When +inf, it is ignored
655
+ """
656
+ self.box2box_transform = box2box_transform
657
+ self.anchor_matcher = anchor_matcher
658
+ self.batch_size_per_image = batch_size_per_image
659
+ self.positive_fraction = positive_fraction
660
+ self.pred_objectness_logits = pred_objectness_logits
661
+ self.pred_anchor_deltas = pred_anchor_deltas
662
+
663
+ self.anchors = anchors
664
+ self.gt_boxes = gt_boxes
665
+ self.num_feature_maps = len(pred_objectness_logits)
666
+ self.num_images = len(images)
667
+ self.boundary_threshold = boundary_threshold
668
+ self.smooth_l1_beta = smooth_l1_beta
669
+
670
+ def _get_ground_truth(self):
671
+ raise NotImplementedError()
672
+
673
+ def predict_proposals(self):
674
+ # pred_anchor_deltas: (L, N, ? Hi, Wi)
675
+ # anchors:(N, L, -1, B)
676
+ # here we loop over specific feature map, NOT images
677
+ proposals = []
678
+ anchors = self.anchors.transpose(0, 1)
679
+ for anchors_i, pred_anchor_deltas_i in zip(anchors, self.pred_anchor_deltas):
680
+ B = anchors_i.size(-1)
681
+ N, _, Hi, Wi = pred_anchor_deltas_i.shape
682
+ anchors_i = anchors_i.flatten(start_dim=0, end_dim=1)
683
+ pred_anchor_deltas_i = pred_anchor_deltas_i.view(N, -1, B, Hi, Wi).permute(0, 3, 4, 1, 2).reshape(-1, B)
684
+ proposals_i = self.box2box_transform.apply_deltas(pred_anchor_deltas_i, anchors_i)
685
+ # Append feature map proposals with shape (N, Hi*Wi*A, B)
686
+ proposals.append(proposals_i.view(N, -1, B))
687
+ proposals = torch.stack(proposals)
688
+ return proposals
689
+
690
+ def predict_objectness_logits(self):
691
+ """
692
+ Returns:
693
+ pred_objectness_logits (list[Tensor]) -> (N, Hi*Wi*A).
694
+ """
695
+ pred_objectness_logits = [
696
+ # Reshape: (N, A, Hi, Wi) -> (N, Hi, Wi, A) -> (N, Hi*Wi*A)
697
+ score.permute(0, 2, 3, 1).reshape(self.num_images, -1)
698
+ for score in self.pred_objectness_logits
699
+ ]
700
+ return pred_objectness_logits
701
+
702
+
703
+ # Main Classes
704
+ class Conv2d(nn.Conv2d):
705
+ def __init__(self, *args, **kwargs):
706
+ norm = kwargs.pop("norm", None)
707
+ activation = kwargs.pop("activation", None)
708
+ super().__init__(*args, **kwargs)
709
+
710
+ self.norm = norm
711
+ self.activation = activation
712
+
713
+ def forward(self, x):
714
+ if x.numel() == 0 and self.training:
715
+ assert not isinstance(self.norm, nn.SyncBatchNorm)
716
+ if x.numel() == 0:
717
+ assert not isinstance(self.norm, nn.GroupNorm)
718
+ output_shape = [
719
+ (i + 2 * p - (di * (k - 1) + 1)) // s + 1
720
+ for i, p, di, k, s in zip(
721
+ x.shape[-2:],
722
+ self.padding,
723
+ self.dilation,
724
+ self.kernel_size,
725
+ self.stride,
726
+ )
727
+ ]
728
+ output_shape = [x.shape[0], self.weight.shape[0]] + output_shape
729
+ empty = _NewEmptyTensorOp.apply(x, output_shape)
730
+ if self.training:
731
+ _dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
732
+ return empty + _dummy
733
+ else:
734
+ return empty
735
+
736
+ x = super().forward(x)
737
+ if self.norm is not None:
738
+ x = self.norm(x)
739
+ if self.activation is not None:
740
+ x = self.activation(x)
741
+ return x
742
+
743
+
744
+ class LastLevelMaxPool(nn.Module):
745
+ """
746
+ This module is used in the original FPN to generate a downsampled P6 feature from P5.
747
+ """
748
+
749
+ def __init__(self):
750
+ super().__init__()
751
+ self.num_levels = 1
752
+ self.in_feature = "p5"
753
+
754
+ def forward(self, x):
755
+ return [nn.functional.max_pool2d(x, kernel_size=1, stride=2, padding=0)]
756
+
757
+
758
+ class LastLevelP6P7(nn.Module):
759
+ """
760
+ This module is used in RetinaNet to generate extra layers, P6 and P7 from C5 feature.
761
+ """
762
+
763
+ def __init__(self, in_channels, out_channels):
764
+ super().__init__()
765
+ self.num_levels = 2
766
+ self.in_feature = "res5"
767
+ self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
768
+ self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)
769
+
770
+ def forward(self, c5):
771
+ p6 = self.p6(c5)
772
+ p7 = self.p7(nn.functional.relu(p6))
773
+ return [p6, p7]
774
+
775
+
776
+ class BasicStem(nn.Module):
777
+ def __init__(self, in_channels=3, out_channels=64, norm="BN", caffe_maxpool=False):
778
+ super().__init__()
779
+ self.conv1 = Conv2d(
780
+ in_channels,
781
+ out_channels,
782
+ kernel_size=7,
783
+ stride=2,
784
+ padding=3,
785
+ bias=False,
786
+ norm=get_norm(norm, out_channels),
787
+ )
788
+ self.caffe_maxpool = caffe_maxpool
789
+ # use pad 1 instead of pad zero
790
+
791
+ def forward(self, x):
792
+ x = self.conv1(x)
793
+ x = nn.functional.relu_(x)
794
+ if self.caffe_maxpool:
795
+ x = nn.functional.max_pool2d(x, kernel_size=3, stride=2, padding=0, ceil_mode=True)
796
+ else:
797
+ x = nn.functional.max_pool2d(x, kernel_size=3, stride=2, padding=1)
798
+ return x
799
+
800
+ @property
801
+ def out_channels(self):
802
+ return self.conv1.out_channels
803
+
804
+ @property
805
+ def stride(self):
806
+ return 4 # = stride 2 conv -> stride 2 max pool
807
+
808
+
809
+ class ResNetBlockBase(nn.Module):
810
+ def __init__(self, in_channels, out_channels, stride):
811
+ super().__init__()
812
+ self.in_channels = in_channels
813
+ self.out_channels = out_channels
814
+ self.stride = stride
815
+
816
+ def freeze(self):
817
+ for p in self.parameters():
818
+ p.requires_grad = False
819
+ return self
820
+
821
+
822
+ class BottleneckBlock(ResNetBlockBase):
823
+ def __init__(
824
+ self,
825
+ in_channels,
826
+ out_channels,
827
+ bottleneck_channels,
828
+ stride=1,
829
+ num_groups=1,
830
+ norm="BN",
831
+ stride_in_1x1=False,
832
+ dilation=1,
833
+ ):
834
+ super().__init__(in_channels, out_channels, stride)
835
+
836
+ if in_channels != out_channels:
837
+ self.shortcut = Conv2d(
838
+ in_channels,
839
+ out_channels,
840
+ kernel_size=1,
841
+ stride=stride,
842
+ bias=False,
843
+ norm=get_norm(norm, out_channels),
844
+ )
845
+ else:
846
+ self.shortcut = None
847
+
848
+ # The original MSRA ResNet models have stride in the first 1x1 conv
849
+ # The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have
850
+ # stride in the 3x3 conv
851
+ stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
852
+
853
+ self.conv1 = Conv2d(
854
+ in_channels,
855
+ bottleneck_channels,
856
+ kernel_size=1,
857
+ stride=stride_1x1,
858
+ bias=False,
859
+ norm=get_norm(norm, bottleneck_channels),
860
+ )
861
+
862
+ self.conv2 = Conv2d(
863
+ bottleneck_channels,
864
+ bottleneck_channels,
865
+ kernel_size=3,
866
+ stride=stride_3x3,
867
+ padding=1 * dilation,
868
+ bias=False,
869
+ groups=num_groups,
870
+ dilation=dilation,
871
+ norm=get_norm(norm, bottleneck_channels),
872
+ )
873
+
874
+ self.conv3 = Conv2d(
875
+ bottleneck_channels,
876
+ out_channels,
877
+ kernel_size=1,
878
+ bias=False,
879
+ norm=get_norm(norm, out_channels),
880
+ )
881
+
882
+ def forward(self, x):
883
+ out = self.conv1(x)
884
+ out = nn.functional.relu_(out)
885
+
886
+ out = self.conv2(out)
887
+ out = nn.functional.relu_(out)
888
+
889
+ out = self.conv3(out)
890
+
891
+ if self.shortcut is not None:
892
+ shortcut = self.shortcut(x)
893
+ else:
894
+ shortcut = x
895
+
896
+ out += shortcut
897
+ out = nn.functional.relu_(out)
898
+ return out
899
+
900
+
901
+ class Backbone(nn.Module, metaclass=ABCMeta):
902
+ def __init__(self):
903
+ super().__init__()
904
+
905
+ @abstractmethod
906
+ def forward(self):
907
+ pass
908
+
909
+ @property
910
+ def size_divisibility(self):
911
+ """
912
+ Some backbones require the input height and width to be divisible by a specific integer. This is
913
+ typically true for encoder / decoder type networks with lateral connection (e.g., FPN) for which feature maps need to match
914
+ dimension in the "bottom up" and "top down" paths. Set to 0 if no specific input size divisibility is required.
915
+ """
916
+ return 0
917
+
918
+ def output_shape(self):
919
+ return {
920
+ name: ShapeSpec(
921
+ channels=self._out_feature_channels[name],
922
+ stride=self._out_feature_strides[name],
923
+ )
924
+ for name in self._out_features
925
+ }
926
+
927
+ @property
928
+ def out_features(self):
929
+ """deprecated"""
930
+ return self._out_features
931
+
932
+ @property
933
+ def out_feature_strides(self):
934
+ """deprecated"""
935
+ return {f: self._out_feature_strides[f] for f in self._out_features}
936
+
937
+ @property
938
+ def out_feature_channels(self):
939
+ """deprecated"""
940
+ return {f: self._out_feature_channels[f] for f in self._out_features}
941
+
942
+
943
+ class ResNet(Backbone):
944
+ def __init__(self, stem, stages, num_classes=None, out_features=None):
945
+ """
946
+ Args:
947
+ stem (nn.Module): a stem module
948
+ stages (list[list[ResNetBlock]]): several (typically 4) stages, each contains multiple :class:`ResNetBlockBase`.
949
+ num_classes (None or int): if None, will not perform classification.
950
+ out_features (list[str]): name of the layers whose outputs should be returned in forward. Can be anything in:
951
+ "stem", "linear", or "res2" ... If None, will return the output of the last layer.
952
+ """
953
+ super(ResNet, self).__init__()
954
+ self.stem = stem
955
+ self.num_classes = num_classes
956
+
957
+ current_stride = self.stem.stride
958
+ self._out_feature_strides = {"stem": current_stride}
959
+ self._out_feature_channels = {"stem": self.stem.out_channels}
960
+
961
+ self.stages_and_names = []
962
+ for i, blocks in enumerate(stages):
963
+ for block in blocks:
964
+ assert isinstance(block, ResNetBlockBase), block
965
+ curr_channels = block.out_channels
966
+ stage = nn.Sequential(*blocks)
967
+ name = "res" + str(i + 2)
968
+ self.add_module(name, stage)
969
+ self.stages_and_names.append((stage, name))
970
+ self._out_feature_strides[name] = current_stride = int(
971
+ current_stride * np.prod([k.stride for k in blocks])
972
+ )
973
+ self._out_feature_channels[name] = blocks[-1].out_channels
974
+
975
+ if num_classes is not None:
976
+ self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
977
+ self.linear = nn.Linear(curr_channels, num_classes)
978
+
979
+ # Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour":
980
+ # "The 1000-way fully-connected layer is initialized by
981
+ # drawing weights from a zero-mean Gaussian with std of 0.01."
982
+ nn.init.normal_(self.linear.weight, stddev=0.01)
983
+ name = "linear"
984
+
985
+ if out_features is None:
986
+ out_features = [name]
987
+ self._out_features = out_features
988
+ assert len(self._out_features)
989
+ children = [x[0] for x in self.named_children()]
990
+ for out_feature in self._out_features:
991
+ assert out_feature in children, "Available children: {}".format(", ".join(children))
992
+
993
+ def forward(self, x):
994
+ outputs = {}
995
+ x = self.stem(x)
996
+ if "stem" in self._out_features:
997
+ outputs["stem"] = x
998
+ for stage, name in self.stages_and_names:
999
+ x = stage(x)
1000
+ if name in self._out_features:
1001
+ outputs[name] = x
1002
+ if self.num_classes is not None:
1003
+ x = self.avgpool(x)
1004
+ x = self.linear(x)
1005
+ if "linear" in self._out_features:
1006
+ outputs["linear"] = x
1007
+ return outputs
1008
+
1009
+ def output_shape(self):
1010
+ return {
1011
+ name: ShapeSpec(
1012
+ channels=self._out_feature_channels[name],
1013
+ stride=self._out_feature_strides[name],
1014
+ )
1015
+ for name in self._out_features
1016
+ }
1017
+
1018
+ @staticmethod
1019
+ def make_stage(
1020
+ block_class,
1021
+ num_blocks,
1022
+ first_stride=None,
1023
+ *,
1024
+ in_channels,
1025
+ out_channels,
1026
+ **kwargs,
1027
+ ):
1028
+ """
1029
+ Usually, layers that produce the same feature map spatial size
1030
+ are defined as one "stage".
1031
+ Under such definition, stride_per_block[1:] should all be 1.
1032
+ """
1033
+ if first_stride is not None:
1034
+ assert "stride" not in kwargs and "stride_per_block" not in kwargs
1035
+ kwargs["stride_per_block"] = [first_stride] + [1] * (num_blocks - 1)
1036
+ blocks = []
1037
+ for i in range(num_blocks):
1038
+ curr_kwargs = {}
1039
+ for k, v in kwargs.items():
1040
+ if k.endswith("_per_block"):
1041
+ assert len(v) == num_blocks, (
1042
+ f"Argument '{k}' of make_stage should have the " f"same length as num_blocks={num_blocks}."
1043
+ )
1044
+ newk = k[: -len("_per_block")]
1045
+ assert newk not in kwargs, f"Cannot call make_stage with both {k} and {newk}!"
1046
+ curr_kwargs[newk] = v[i]
1047
+ else:
1048
+ curr_kwargs[k] = v
1049
+
1050
+ blocks.append(block_class(in_channels=in_channels, out_channels=out_channels, **curr_kwargs))
1051
+ in_channels = out_channels
1052
+
1053
+ return blocks
1054
+
1055
+
1056
+ class ROIPooler(nn.Module):
1057
+ """
1058
+ Region of interest feature map pooler that supports pooling from one or more
1059
+ feature maps.
1060
+ """
1061
+
1062
+ def __init__(
1063
+ self,
1064
+ output_size,
1065
+ scales,
1066
+ sampling_ratio,
1067
+ canonical_box_size=224,
1068
+ canonical_level=4,
1069
+ ):
1070
+ super().__init__()
1071
+ # assumption that stride is a power of 2.
1072
+ min_level = -math.log2(scales[0])
1073
+ max_level = -math.log2(scales[-1])
1074
+
1075
+ # a bunch of testing
1076
+ assert math.isclose(min_level, int(min_level)) and math.isclose(max_level, int(max_level))
1077
+ assert len(scales) == max_level - min_level + 1, "not pyramid"
1078
+ assert 0 < min_level and min_level <= max_level
1079
+ if isinstance(output_size, int):
1080
+ output_size = (output_size, output_size)
1081
+ assert len(output_size) == 2 and isinstance(output_size[0], int) and isinstance(output_size[1], int)
1082
+ if len(scales) > 1:
1083
+ assert min_level <= canonical_level and canonical_level <= max_level
1084
+ assert canonical_box_size > 0
1085
+
1086
+ self.output_size = output_size
1087
+ self.min_level = int(min_level)
1088
+ self.max_level = int(max_level)
1089
+ self.level_poolers = nn.ModuleList(RoIPool(output_size, spatial_scale=scale) for scale in scales)
1090
+ self.canonical_level = canonical_level
1091
+ self.canonical_box_size = canonical_box_size
1092
+
1093
+ def forward(self, feature_maps, boxes):
1094
+ """
1095
+ Args:
1096
+ feature_maps: List[torch.Tensor(N,C,W,H)]
1097
+ box_lists: list[torch.Tensor])
1098
+ Returns:
1099
+ A tensor of shape(N*B, Channels, output_size, output_size)
1100
+ """
1101
+ x = [v for v in feature_maps.values()]
1102
+ num_level_assignments = len(self.level_poolers)
1103
+ assert len(x) == num_level_assignments and len(boxes) == x[0].size(0)
1104
+
1105
+ pooler_fmt_boxes = convert_boxes_to_pooler_format(boxes)
1106
+
1107
+ if num_level_assignments == 1:
1108
+ return self.level_poolers[0](x[0], pooler_fmt_boxes)
1109
+
1110
+ level_assignments = assign_boxes_to_levels(
1111
+ boxes,
1112
+ self.min_level,
1113
+ self.max_level,
1114
+ self.canonical_box_size,
1115
+ self.canonical_level,
1116
+ )
1117
+
1118
+ num_boxes = len(pooler_fmt_boxes)
1119
+ num_channels = x[0].shape[1]
1120
+ output_size = self.output_size[0]
1121
+
1122
+ dtype, device = x[0].dtype, x[0].device
1123
+ output = torch.zeros(
1124
+ (num_boxes, num_channels, output_size, output_size),
1125
+ dtype=dtype,
1126
+ device=device,
1127
+ )
1128
+
1129
+ for level, (x_level, pooler) in enumerate(zip(x, self.level_poolers)):
1130
+ inds = torch.nonzero(level_assignments == level).squeeze(1)
1131
+ pooler_fmt_boxes_level = pooler_fmt_boxes[inds]
1132
+ output[inds] = pooler(x_level, pooler_fmt_boxes_level)
1133
+
1134
+ return output
1135
+
1136
+
1137
+ class ROIOutputs(object):
1138
+ def __init__(self, cfg, training=False):
1139
+ self.smooth_l1_beta = cfg.ROI_BOX_HEAD.SMOOTH_L1_BETA
1140
+ self.box2box_transform = Box2BoxTransform(weights=cfg.ROI_BOX_HEAD.BBOX_REG_WEIGHTS)
1141
+ self.training = training
1142
+ self.score_thresh = cfg.ROI_HEADS.SCORE_THRESH_TEST
1143
+ self.min_detections = cfg.MIN_DETECTIONS
1144
+ self.max_detections = cfg.MAX_DETECTIONS
1145
+
1146
+ nms_thresh = cfg.ROI_HEADS.NMS_THRESH_TEST
1147
+ if not isinstance(nms_thresh, list):
1148
+ nms_thresh = [nms_thresh]
1149
+ self.nms_thresh = nms_thresh
1150
+
1151
+ def _predict_boxes(self, proposals, box_deltas, preds_per_image):
1152
+ num_pred = box_deltas.size(0)
1153
+ B = proposals[0].size(-1)
1154
+ K = box_deltas.size(-1) // B
1155
+ box_deltas = box_deltas.view(num_pred * K, B)
1156
+ proposals = torch.cat(proposals, dim=0).unsqueeze(-2).expand(num_pred, K, B)
1157
+ proposals = proposals.reshape(-1, B)
1158
+ boxes = self.box2box_transform.apply_deltas(box_deltas, proposals)
1159
+ return boxes.view(num_pred, K * B).split(preds_per_image, dim=0)
1160
+
1161
+ def _predict_objs(self, obj_logits, preds_per_image):
1162
+ probs = nn.functional.softmax(obj_logits, dim=-1)
1163
+ probs = probs.split(preds_per_image, dim=0)
1164
+ return probs
1165
+
1166
+ def _predict_attrs(self, attr_logits, preds_per_image):
1167
+ attr_logits = attr_logits[..., :-1].softmax(-1)
1168
+ attr_probs, attrs = attr_logits.max(-1)
1169
+ return attr_probs.split(preds_per_image, dim=0), attrs.split(preds_per_image, dim=0)
1170
+
1171
+ @torch.no_grad()
1172
+ def inference(
1173
+ self,
1174
+ obj_logits,
1175
+ attr_logits,
1176
+ box_deltas,
1177
+ pred_boxes,
1178
+ features,
1179
+ sizes,
1180
+ scales=None,
1181
+ ):
1182
+ # only the pred boxes is the
1183
+ preds_per_image = [p.size(0) for p in pred_boxes]
1184
+ boxes_all = self._predict_boxes(pred_boxes, box_deltas, preds_per_image)
1185
+ obj_scores_all = self._predict_objs(obj_logits, preds_per_image) # list of length N
1186
+ attr_probs_all, attrs_all = self._predict_attrs(attr_logits, preds_per_image)
1187
+ features = features.split(preds_per_image, dim=0)
1188
+
1189
+ # fun for each image too, also I can experiment and do multiple images
1190
+ final_results = []
1191
+ zipped = zip(boxes_all, obj_scores_all, attr_probs_all, attrs_all, sizes)
1192
+ for i, (boxes, obj_scores, attr_probs, attrs, size) in enumerate(zipped):
1193
+ for nms_t in self.nms_thresh:
1194
+ outputs = do_nms(
1195
+ boxes,
1196
+ obj_scores,
1197
+ size,
1198
+ self.score_thresh,
1199
+ nms_t,
1200
+ self.min_detections,
1201
+ self.max_detections,
1202
+ )
1203
+ if outputs is not None:
1204
+ max_boxes, max_scores, classes, ids = outputs
1205
+ break
1206
+
1207
+ if scales is not None:
1208
+ scale_yx = scales[i]
1209
+ max_boxes[:, 0::2] *= scale_yx[1]
1210
+ max_boxes[:, 1::2] *= scale_yx[0]
1211
+
1212
+ final_results.append(
1213
+ (
1214
+ max_boxes,
1215
+ classes,
1216
+ max_scores,
1217
+ attrs[ids],
1218
+ attr_probs[ids],
1219
+ features[i][ids],
1220
+ )
1221
+ )
1222
+ boxes, classes, class_probs, attrs, attr_probs, roi_features = map(list, zip(*final_results))
1223
+ return boxes, classes, class_probs, attrs, attr_probs, roi_features
1224
+
1225
+ def training(self, obj_logits, attr_logits, box_deltas, pred_boxes, features, sizes):
1226
+ pass
1227
+
1228
+ def __call__(
1229
+ self,
1230
+ obj_logits,
1231
+ attr_logits,
1232
+ box_deltas,
1233
+ pred_boxes,
1234
+ features,
1235
+ sizes,
1236
+ scales=None,
1237
+ ):
1238
+ if self.training:
1239
+ raise NotImplementedError()
1240
+ return self.inference(
1241
+ obj_logits,
1242
+ attr_logits,
1243
+ box_deltas,
1244
+ pred_boxes,
1245
+ features,
1246
+ sizes,
1247
+ scales=scales,
1248
+ )
1249
+
1250
+
1251
+ class Res5ROIHeads(nn.Module):
1252
+ """
1253
+ ROIHeads perform all per-region computation in an R-CNN.
1254
+ It contains logic of cropping the regions, extract per-region features
1255
+ (by the res-5 block in this case), and make per-region predictions.
1256
+ """
1257
+
1258
+ def __init__(self, cfg, input_shape):
1259
+ super().__init__()
1260
+ self.batch_size_per_image = cfg.RPN.BATCH_SIZE_PER_IMAGE
1261
+ self.positive_sample_fraction = cfg.ROI_HEADS.POSITIVE_FRACTION
1262
+ self.in_features = cfg.ROI_HEADS.IN_FEATURES
1263
+ self.num_classes = cfg.ROI_HEADS.NUM_CLASSES
1264
+ self.proposal_append_gt = cfg.ROI_HEADS.PROPOSAL_APPEND_GT
1265
+ self.feature_strides = {k: v.stride for k, v in input_shape.items()}
1266
+ self.feature_channels = {k: v.channels for k, v in input_shape.items()}
1267
+ self.cls_agnostic_bbox_reg = cfg.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG
1268
+ self.stage_channel_factor = 2 ** 3 # res5 is 8x res2
1269
+ self.out_channels = cfg.RESNETS.RES2_OUT_CHANNELS * self.stage_channel_factor
1270
+
1271
+ # self.proposal_matcher = Matcher(
1272
+ # cfg.ROI_HEADS.IOU_THRESHOLDS,
1273
+ # cfg.ROI_HEADS.IOU_LABELS,
1274
+ # allow_low_quality_matches=False,
1275
+ # )
1276
+
1277
+ pooler_resolution = cfg.ROI_BOX_HEAD.POOLER_RESOLUTION
1278
+ pooler_scales = (1.0 / self.feature_strides[self.in_features[0]],)
1279
+ sampling_ratio = cfg.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
1280
+ res5_halve = cfg.ROI_BOX_HEAD.RES5HALVE
1281
+ use_attr = cfg.ROI_BOX_HEAD.ATTR
1282
+ num_attrs = cfg.ROI_BOX_HEAD.NUM_ATTRS
1283
+
1284
+ self.pooler = ROIPooler(
1285
+ output_size=pooler_resolution,
1286
+ scales=pooler_scales,
1287
+ sampling_ratio=sampling_ratio,
1288
+ )
1289
+
1290
+ self.res5 = self._build_res5_block(cfg)
1291
+ if not res5_halve:
1292
+ """
1293
+ Modifications for VG in RoI heads:
1294
+ 1. Change the stride of conv1 and shortcut in Res5.Block1 from 2 to 1
1295
+ 2. Modifying all conv2 with (padding: 1 --> 2) and (dilation: 1 --> 2)
1296
+ """
1297
+ self.res5[0].conv1.stride = (1, 1)
1298
+ self.res5[0].shortcut.stride = (1, 1)
1299
+ for i in range(3):
1300
+ self.res5[i].conv2.padding = (2, 2)
1301
+ self.res5[i].conv2.dilation = (2, 2)
1302
+
1303
+ self.box_predictor = FastRCNNOutputLayers(
1304
+ self.out_channels,
1305
+ self.num_classes,
1306
+ self.cls_agnostic_bbox_reg,
1307
+ use_attr=use_attr,
1308
+ num_attrs=num_attrs,
1309
+ )
1310
+
1311
+ def _build_res5_block(self, cfg):
1312
+ stage_channel_factor = self.stage_channel_factor # res5 is 8x res2
1313
+ num_groups = cfg.RESNETS.NUM_GROUPS
1314
+ width_per_group = cfg.RESNETS.WIDTH_PER_GROUP
1315
+ bottleneck_channels = num_groups * width_per_group * stage_channel_factor
1316
+ out_channels = self.out_channels
1317
+ stride_in_1x1 = cfg.RESNETS.STRIDE_IN_1X1
1318
+ norm = cfg.RESNETS.NORM
1319
+
1320
+ blocks = ResNet.make_stage(
1321
+ BottleneckBlock,
1322
+ 3,
1323
+ first_stride=2,
1324
+ in_channels=out_channels // 2,
1325
+ bottleneck_channels=bottleneck_channels,
1326
+ out_channels=out_channels,
1327
+ num_groups=num_groups,
1328
+ norm=norm,
1329
+ stride_in_1x1=stride_in_1x1,
1330
+ )
1331
+ return nn.Sequential(*blocks)
1332
+
1333
+ def _shared_roi_transform(self, features, boxes):
1334
+ x = self.pooler(features, boxes)
1335
+ return self.res5(x)
1336
+
1337
+ def forward(self, features, proposal_boxes, gt_boxes=None):
1338
+ if self.training:
1339
+ """
1340
+ see https://github.com/airsplay/py-bottom-up-attention/\
1341
+ blob/master/detectron2/modeling/roi_heads/roi_heads.py
1342
+ """
1343
+ raise NotImplementedError()
1344
+
1345
+ assert not proposal_boxes[0].requires_grad
1346
+ box_features = self._shared_roi_transform(features, proposal_boxes)
1347
+ feature_pooled = box_features.mean(dim=[2, 3]) # pooled to 1x1
1348
+ obj_logits, attr_logits, pred_proposal_deltas = self.box_predictor(feature_pooled)
1349
+ return obj_logits, attr_logits, pred_proposal_deltas, feature_pooled
1350
+
1351
+
1352
+ class AnchorGenerator(nn.Module):
1353
+ """
1354
+ For a set of image sizes and feature maps, computes a set of anchors.
1355
+ """
1356
+
1357
+ def __init__(self, cfg, input_shape: List[ShapeSpec]):
1358
+ super().__init__()
1359
+ sizes = cfg.ANCHOR_GENERATOR.SIZES
1360
+ aspect_ratios = cfg.ANCHOR_GENERATOR.ASPECT_RATIOS
1361
+ self.strides = [x.stride for x in input_shape]
1362
+ self.offset = cfg.ANCHOR_GENERATOR.OFFSET
1363
+ assert 0.0 <= self.offset < 1.0, self.offset
1364
+
1365
+ """
1366
+ sizes (list[list[int]]): sizes[i] is the list of anchor sizes for feat map i
1367
+ 1. given in absolute lengths in units of the input image;
1368
+ 2. they do not dynamically scale if the input image size changes.
1369
+ aspect_ratios (list[list[float]])
1370
+ strides (list[int]): stride of each input feature.
1371
+ """
1372
+
1373
+ self.num_features = len(self.strides)
1374
+ self.cell_anchors = nn.ParameterList(self._calculate_anchors(sizes, aspect_ratios))
1375
+ self._spacial_feat_dim = 4
1376
+
1377
+ def _calculate_anchors(self, sizes, aspect_ratios):
1378
+ # If one size (or aspect ratio) is specified and there are multiple feature
1379
+ # maps, then we "broadcast" anchors of that single size (or aspect ratio)
1380
+ if len(sizes) == 1:
1381
+ sizes *= self.num_features
1382
+ if len(aspect_ratios) == 1:
1383
+ aspect_ratios *= self.num_features
1384
+ assert self.num_features == len(sizes)
1385
+ assert self.num_features == len(aspect_ratios)
1386
+
1387
+ cell_anchors = [self.generate_cell_anchors(s, a).float() for s, a in zip(sizes, aspect_ratios)]
1388
+
1389
+ return cell_anchors
1390
+
1391
+ @property
1392
+ def box_dim(self):
1393
+ return self._spacial_feat_dim
1394
+
1395
+ @property
1396
+ def num_cell_anchors(self):
1397
+ """
1398
+ Returns:
1399
+ list[int]: Each int is the number of anchors at every pixel location, on that feature map.
1400
+ """
1401
+ return [len(cell_anchors) for cell_anchors in self.cell_anchors]
1402
+
1403
+ def grid_anchors(self, grid_sizes):
1404
+ anchors = []
1405
+ for (size, stride, base_anchors) in zip(grid_sizes, self.strides, self.cell_anchors):
1406
+ shift_x, shift_y = _create_grid_offsets(size, stride, self.offset, base_anchors.device)
1407
+ shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1)
1408
+
1409
+ anchors.append((shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4))
1410
+
1411
+ return anchors
1412
+
1413
+ def generate_cell_anchors(self, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2)):
1414
+ """
1415
+ anchors are continuous geometric rectangles
1416
+ centered on one feature map point sample.
1417
+ We can later build the set of anchors
1418
+ for the entire feature map by tiling these tensors
1419
+ """
1420
+
1421
+ anchors = []
1422
+ for size in sizes:
1423
+ area = size ** 2.0
1424
+ for aspect_ratio in aspect_ratios:
1425
+ w = math.sqrt(area / aspect_ratio)
1426
+ h = aspect_ratio * w
1427
+ x0, y0, x1, y1 = -w / 2.0, -h / 2.0, w / 2.0, h / 2.0
1428
+ anchors.append([x0, y0, x1, y1])
1429
+ return nn.Parameter(torch.tensor(anchors))
1430
+
1431
+ def forward(self, features):
1432
+ """
1433
+ Args:
1434
+ features List[torch.Tensor]: list of feature maps on which to generate anchors.
1435
+ Returns:
1436
+ torch.Tensor: a list of #image elements.
1437
+ """
1438
+ num_images = features[0].size(0)
1439
+ grid_sizes = [feature_map.shape[-2:] for feature_map in features]
1440
+ anchors_over_all_feature_maps = self.grid_anchors(grid_sizes)
1441
+ anchors_over_all_feature_maps = torch.stack(anchors_over_all_feature_maps)
1442
+ return anchors_over_all_feature_maps.unsqueeze(0).repeat_interleave(num_images, dim=0)
1443
+
1444
+
1445
+ class RPNHead(nn.Module):
1446
+ """
1447
+ RPN classification and regression heads. Uses a 3x3 conv to produce a shared
1448
+ hidden state from which one 1x1 conv predicts objectness logits for each anchor
1449
+ and a second 1x1 conv predicts bounding-box deltas specifying how to deform
1450
+ each anchor into an object proposal.
1451
+ """
1452
+
1453
+ def __init__(self, cfg, input_shape: List[ShapeSpec]):
1454
+ super().__init__()
1455
+
1456
+ # Standard RPN is shared across levels:
1457
+ in_channels = [s.channels for s in input_shape]
1458
+ assert len(set(in_channels)) == 1, "Each level must have the same channel!"
1459
+ in_channels = in_channels[0]
1460
+
1461
+ anchor_generator = AnchorGenerator(cfg, input_shape)
1462
+ num_cell_anchors = anchor_generator.num_cell_anchors
1463
+ box_dim = anchor_generator.box_dim
1464
+ assert len(set(num_cell_anchors)) == 1, "Each level must have the same number of cell anchors"
1465
+ num_cell_anchors = num_cell_anchors[0]
1466
+
1467
+ if cfg.PROPOSAL_GENERATOR.HIDDEN_CHANNELS == -1:
1468
+ hid_channels = in_channels
1469
+ else:
1470
+ hid_channels = cfg.PROPOSAL_GENERATOR.HIDDEN_CHANNELS
1471
+ # Modifications for VG in RPN (modeling/proposal_generator/rpn.py)
1472
+ # Use hidden dim instead fo the same dim as Res4 (in_channels)
1473
+
1474
+ # 3x3 conv for the hidden representation
1475
+ self.conv = nn.Conv2d(in_channels, hid_channels, kernel_size=3, stride=1, padding=1)
1476
+ # 1x1 conv for predicting objectness logits
1477
+ self.objectness_logits = nn.Conv2d(hid_channels, num_cell_anchors, kernel_size=1, stride=1)
1478
+ # 1x1 conv for predicting box2box transform deltas
1479
+ self.anchor_deltas = nn.Conv2d(hid_channels, num_cell_anchors * box_dim, kernel_size=1, stride=1)
1480
+
1481
+ for layer in [self.conv, self.objectness_logits, self.anchor_deltas]:
1482
+ nn.init.normal_(layer.weight, std=0.01)
1483
+ nn.init.constant_(layer.bias, 0)
1484
+
1485
+ def forward(self, features):
1486
+ """
1487
+ Args:
1488
+ features (list[Tensor]): list of feature maps
1489
+ """
1490
+ pred_objectness_logits = []
1491
+ pred_anchor_deltas = []
1492
+ for x in features:
1493
+ t = nn.functional.relu(self.conv(x))
1494
+ pred_objectness_logits.append(self.objectness_logits(t))
1495
+ pred_anchor_deltas.append(self.anchor_deltas(t))
1496
+ return pred_objectness_logits, pred_anchor_deltas
1497
+
1498
+
1499
+ class RPN(nn.Module):
1500
+ """
1501
+ Region Proposal Network, introduced by the Faster R-CNN paper.
1502
+ """
1503
+
1504
+ def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
1505
+ super().__init__()
1506
+
1507
+ self.min_box_side_len = cfg.PROPOSAL_GENERATOR.MIN_SIZE
1508
+ self.in_features = cfg.RPN.IN_FEATURES
1509
+ self.nms_thresh = cfg.RPN.NMS_THRESH
1510
+ self.batch_size_per_image = cfg.RPN.BATCH_SIZE_PER_IMAGE
1511
+ self.positive_fraction = cfg.RPN.POSITIVE_FRACTION
1512
+ self.smooth_l1_beta = cfg.RPN.SMOOTH_L1_BETA
1513
+ self.loss_weight = cfg.RPN.LOSS_WEIGHT
1514
+
1515
+ self.pre_nms_topk = {
1516
+ True: cfg.RPN.PRE_NMS_TOPK_TRAIN,
1517
+ False: cfg.RPN.PRE_NMS_TOPK_TEST,
1518
+ }
1519
+ self.post_nms_topk = {
1520
+ True: cfg.RPN.POST_NMS_TOPK_TRAIN,
1521
+ False: cfg.RPN.POST_NMS_TOPK_TEST,
1522
+ }
1523
+ self.boundary_threshold = cfg.RPN.BOUNDARY_THRESH
1524
+
1525
+ self.anchor_generator = AnchorGenerator(cfg, [input_shape[f] for f in self.in_features])
1526
+ self.box2box_transform = Box2BoxTransform(weights=cfg.RPN.BBOX_REG_WEIGHTS)
1527
+ self.anchor_matcher = Matcher(
1528
+ cfg.RPN.IOU_THRESHOLDS,
1529
+ cfg.RPN.IOU_LABELS,
1530
+ allow_low_quality_matches=True,
1531
+ )
1532
+ self.rpn_head = RPNHead(cfg, [input_shape[f] for f in self.in_features])
1533
+
1534
+ def training(self, images, image_shapes, features, gt_boxes):
1535
+ pass
1536
+
1537
+ def inference(self, outputs, images, image_shapes, features, gt_boxes=None):
1538
+ outputs = find_top_rpn_proposals(
1539
+ outputs.predict_proposals(),
1540
+ outputs.predict_objectness_logits(),
1541
+ images,
1542
+ image_shapes,
1543
+ self.nms_thresh,
1544
+ self.pre_nms_topk[self.training],
1545
+ self.post_nms_topk[self.training],
1546
+ self.min_box_side_len,
1547
+ self.training,
1548
+ )
1549
+
1550
+ results = []
1551
+ for img in outputs:
1552
+ im_boxes, img_box_logits = img
1553
+ img_box_logits, inds = img_box_logits.sort(descending=True)
1554
+ im_boxes = im_boxes[inds]
1555
+ results.append((im_boxes, img_box_logits))
1556
+
1557
+ (proposal_boxes, logits) = tuple(map(list, zip(*results)))
1558
+ return proposal_boxes, logits
1559
+
1560
+ def forward(self, images, image_shapes, features, gt_boxes=None):
1561
+ """
1562
+ Args:
1563
+ images (torch.Tensor): input images of length `N`
1564
+ features (dict[str: Tensor])
1565
+ gt_instances
1566
+ """
1567
+ # features is dict, key = block level, v = feature_map
1568
+ features = [features[f] for f in self.in_features]
1569
+ pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features)
1570
+ anchors = self.anchor_generator(features)
1571
+ outputs = RPNOutputs(
1572
+ self.box2box_transform,
1573
+ self.anchor_matcher,
1574
+ self.batch_size_per_image,
1575
+ self.positive_fraction,
1576
+ images,
1577
+ pred_objectness_logits,
1578
+ pred_anchor_deltas,
1579
+ anchors,
1580
+ self.boundary_threshold,
1581
+ gt_boxes,
1582
+ self.smooth_l1_beta,
1583
+ )
1584
+ # For RPN-only models, the proposals are the final output
1585
+
1586
+ if self.training:
1587
+ raise NotImplementedError()
1588
+ return self.training(outputs, images, image_shapes, features, gt_boxes)
1589
+ else:
1590
+ return self.inference(outputs, images, image_shapes, features, gt_boxes)
1591
+
1592
+
1593
+ class FastRCNNOutputLayers(nn.Module):
1594
+ """
1595
+ Two linear layers for predicting Fast R-CNN outputs:
1596
+ (1) proposal-to-detection box regression deltas
1597
+ (2) classification scores
1598
+ """
1599
+
1600
+ def __init__(
1601
+ self,
1602
+ input_size,
1603
+ num_classes,
1604
+ cls_agnostic_bbox_reg,
1605
+ box_dim=4,
1606
+ use_attr=False,
1607
+ num_attrs=-1,
1608
+ ):
1609
+ """
1610
+ Args:
1611
+ input_size (int): channels, or (channels, height, width)
1612
+ num_classes (int)
1613
+ cls_agnostic_bbox_reg (bool)
1614
+ box_dim (int)
1615
+ """
1616
+ super().__init__()
1617
+
1618
+ if not isinstance(input_size, int):
1619
+ input_size = np.prod(input_size)
1620
+
1621
+ # (do + 1 for background class)
1622
+ self.cls_score = nn.Linear(input_size, num_classes + 1)
1623
+ num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes
1624
+ self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim)
1625
+
1626
+ self.use_attr = use_attr
1627
+ if use_attr:
1628
+ """
1629
+ Modifications for VG in RoI heads
1630
+ Embedding: {num_classes + 1} --> {input_size // 8}
1631
+ Linear: {input_size + input_size // 8} --> {input_size // 4}
1632
+ Linear: {input_size // 4} --> {num_attrs + 1}
1633
+ """
1634
+ self.cls_embedding = nn.Embedding(num_classes + 1, input_size // 8)
1635
+ self.fc_attr = nn.Linear(input_size + input_size // 8, input_size // 4)
1636
+ self.attr_score = nn.Linear(input_size // 4, num_attrs + 1)
1637
+
1638
+ nn.init.normal_(self.cls_score.weight, std=0.01)
1639
+ nn.init.normal_(self.bbox_pred.weight, std=0.001)
1640
+ for item in [self.cls_score, self.bbox_pred]:
1641
+ nn.init.constant_(item.bias, 0)
1642
+
1643
+ def forward(self, roi_features):
1644
+ if roi_features.dim() > 2:
1645
+ roi_features = torch.flatten(roi_features, start_dim=1)
1646
+ scores = self.cls_score(roi_features)
1647
+ proposal_deltas = self.bbox_pred(roi_features)
1648
+ if self.use_attr:
1649
+ _, max_class = scores.max(-1) # [b, c] --> [b]
1650
+ cls_emb = self.cls_embedding(max_class) # [b] --> [b, 256]
1651
+ roi_features = torch.cat([roi_features, cls_emb], -1) # [b, 2048] + [b, 256] --> [b, 2304]
1652
+ roi_features = self.fc_attr(roi_features)
1653
+ roi_features = nn.functional.relu(roi_features)
1654
+ attr_scores = self.attr_score(roi_features)
1655
+ return scores, attr_scores, proposal_deltas
1656
+ else:
1657
+ return scores, proposal_deltas
1658
+
1659
+
1660
+ class GeneralizedRCNN(nn.Module):
1661
+ def __init__(self, cfg):
1662
+ super().__init__()
1663
+
1664
+ self.backbone = build_backbone(cfg)
1665
+ self.proposal_generator = RPN(cfg, self.backbone.output_shape())
1666
+ self.roi_heads = Res5ROIHeads(cfg, self.backbone.output_shape())
1667
+ self.roi_outputs = ROIOutputs(cfg)
1668
+
1669
+ @classmethod
1670
+ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
1671
+ config = kwargs.pop("config", None)
1672
+ state_dict = kwargs.pop("state_dict", None)
1673
+ cache_dir = kwargs.pop("cache_dir", None)
1674
+ from_tf = kwargs.pop("from_tf", False)
1675
+ force_download = kwargs.pop("force_download", False)
1676
+ resume_download = kwargs.pop("resume_download", False)
1677
+ proxies = kwargs.pop("proxies", None)
1678
+ local_files_only = kwargs.pop("local_files_only", False)
1679
+ use_cdn = kwargs.pop("use_cdn", True)
1680
+
1681
+ # Load config if we don't provide a configuration
1682
+ if not isinstance(config, Config):
1683
+ config_path = config if config is not None else pretrained_model_name_or_path
1684
+ # try:
1685
+ config = Config.from_pretrained(
1686
+ config_path,
1687
+ cache_dir=cache_dir,
1688
+ force_download=force_download,
1689
+ resume_download=resume_download,
1690
+ proxies=proxies,
1691
+ local_files_only=local_files_only,
1692
+ )
1693
+
1694
+ # Load model
1695
+ if pretrained_model_name_or_path is not None:
1696
+ if os.path.isdir(pretrained_model_name_or_path):
1697
+ if os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
1698
+ # Load from a PyTorch checkpoint
1699
+ archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
1700
+ else:
1701
+ raise EnvironmentError(
1702
+ "Error no file named {} found in directory {} ".format(
1703
+ WEIGHTS_NAME,
1704
+ pretrained_model_name_or_path,
1705
+ )
1706
+ )
1707
+ elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
1708
+ archive_file = pretrained_model_name_or_path
1709
+ elif os.path.isfile(pretrained_model_name_or_path + ".index"):
1710
+ assert (
1711
+ from_tf
1712
+ ), "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format(
1713
+ pretrained_model_name_or_path + ".index"
1714
+ )
1715
+ archive_file = pretrained_model_name_or_path + ".index"
1716
+ else:
1717
+ archive_file = hf_bucket_url(
1718
+ pretrained_model_name_or_path,
1719
+ filename=WEIGHTS_NAME,
1720
+ use_cdn=use_cdn,
1721
+ )
1722
+
1723
+ try:
1724
+ # Load from URL or cache if already cached
1725
+ resolved_archive_file = cached_path(
1726
+ archive_file,
1727
+ cache_dir=cache_dir,
1728
+ force_download=force_download,
1729
+ proxies=proxies,
1730
+ resume_download=resume_download,
1731
+ local_files_only=local_files_only,
1732
+ )
1733
+ if resolved_archive_file is None:
1734
+ raise EnvironmentError
1735
+ except EnvironmentError:
1736
+ msg = f"Can't load weights for '{pretrained_model_name_or_path}'."
1737
+ raise EnvironmentError(msg)
1738
+
1739
+ if resolved_archive_file == archive_file:
1740
+ print("loading weights file {}".format(archive_file))
1741
+ else:
1742
+ print("loading weights file {} from cache at {}".format(archive_file, resolved_archive_file))
1743
+ else:
1744
+ resolved_archive_file = None
1745
+
1746
+ # Instantiate model.
1747
+ model = cls(config)
1748
+
1749
+ if state_dict is None:
1750
+ try:
1751
+ try:
1752
+ state_dict = torch.load(resolved_archive_file, map_location="cpu")
1753
+ except Exception:
1754
+ state_dict = load_checkpoint(resolved_archive_file)
1755
+
1756
+ except Exception:
1757
+ raise OSError(
1758
+ "Unable to load weights from pytorch checkpoint file. "
1759
+ "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. "
1760
+ )
1761
+
1762
+ missing_keys = []
1763
+ unexpected_keys = []
1764
+ error_msgs = []
1765
+
1766
+ # Convert old format to new format if needed from a PyTorch state_dict
1767
+ old_keys = []
1768
+ new_keys = []
1769
+ for key in state_dict.keys():
1770
+ new_key = None
1771
+ if "gamma" in key:
1772
+ new_key = key.replace("gamma", "weight")
1773
+ if "beta" in key:
1774
+ new_key = key.replace("beta", "bias")
1775
+ if new_key:
1776
+ old_keys.append(key)
1777
+ new_keys.append(new_key)
1778
+ for old_key, new_key in zip(old_keys, new_keys):
1779
+ state_dict[new_key] = state_dict.pop(old_key)
1780
+
1781
+ # copy state_dict so _load_from_state_dict can modify it
1782
+ metadata = getattr(state_dict, "_metadata", None)
1783
+ state_dict = state_dict.copy()
1784
+ if metadata is not None:
1785
+ state_dict._metadata = metadata
1786
+
1787
+ model_to_load = model
1788
+ model_to_load.load_state_dict(state_dict)
1789
+
1790
+ if model.__class__.__name__ != model_to_load.__class__.__name__:
1791
+ base_model_state_dict = model_to_load.state_dict().keys()
1792
+ head_model_state_dict_without_base_prefix = [
1793
+ key.split(cls.base_model_prefix + ".")[-1] for key in model.state_dict().keys()
1794
+ ]
1795
+ missing_keys.extend(head_model_state_dict_without_base_prefix - base_model_state_dict)
1796
+
1797
+ if len(unexpected_keys) > 0:
1798
+ print(
1799
+ f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when "
1800
+ f"initializing {model.__class__.__name__}: {unexpected_keys}\n"
1801
+ f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task "
1802
+ f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n"
1803
+ f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect "
1804
+ f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
1805
+ )
1806
+ else:
1807
+ print(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n")
1808
+ if len(missing_keys) > 0:
1809
+ print(
1810
+ f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
1811
+ f"and are newly initialized: {missing_keys}\n"
1812
+ f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
1813
+ )
1814
+ else:
1815
+ print(
1816
+ f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n"
1817
+ f"If your task is similar to the task the model of the checkpoint was trained on, "
1818
+ f"you can already use {model.__class__.__name__} for predictions without further training."
1819
+ )
1820
+ if len(error_msgs) > 0:
1821
+ raise RuntimeError(
1822
+ "Error(s) in loading state_dict for {}:\n\t{}".format(
1823
+ model.__class__.__name__, "\n\t".join(error_msgs)
1824
+ )
1825
+ )
1826
+ # Set model in evaluation mode to deactivate DropOut modules by default
1827
+ model.eval()
1828
+ return model
1829
+
1830
+ def forward(
1831
+ self,
1832
+ images,
1833
+ image_shapes,
1834
+ gt_boxes=None,
1835
+ proposals=None,
1836
+ scales_yx=None,
1837
+ **kwargs,
1838
+ ):
1839
+ """
1840
+ kwargs:
1841
+ max_detections (int), return_tensors {"np", "pt", None}, padding {None,
1842
+ "max_detections"}, pad_value (int), location = {"cuda", "cpu"}
1843
+ """
1844
+ data = next(self.parameters()).data
1845
+ with torch.no_grad():
1846
+ if self.training:
1847
+ print ("warning. you are attempting to train the frcnn model which is not supportd. switching to eval mode")
1848
+ self.eval()
1849
+ for param in self.parameters():
1850
+ param.requires_grad_(False)
1851
+ #print (image_shapes.dtype)
1852
+ return self.inference(
1853
+ images=images.to(dtype=data.dtype, device=data.device),
1854
+ image_shapes=image_shapes.to(device=data.device),
1855
+ gt_boxes=gt_boxes.to(dtype=data.dtype, device=data.device) if gt_boxes is not None else None,
1856
+ proposals=proposals.to(dtype=data.dtype, device=data.device) if proposals is not None else None,
1857
+ scales_yx=scales_yx.to(dtype=data.dtype, device=data.device) if scales_yx is not None else None,
1858
+ **kwargs,
1859
+ )
1860
+
1861
+ @torch.no_grad()
1862
+ def inference(
1863
+ self,
1864
+ images,
1865
+ image_shapes,
1866
+ gt_boxes=None,
1867
+ proposals=None,
1868
+ scales_yx=None,
1869
+ **kwargs,
1870
+ ):
1871
+ # run images through backbone
1872
+ original_sizes = image_shapes * scales_yx
1873
+ features = self.backbone(images)
1874
+
1875
+ # generate proposals if none are available
1876
+ if proposals is None:
1877
+ proposal_boxes, _ = self.proposal_generator(images, image_shapes, features, gt_boxes)
1878
+ else:
1879
+ assert proposals is not None
1880
+
1881
+ # pool object features from either gt_boxes, or from proposals
1882
+ obj_logits, attr_logits, box_deltas, feature_pooled = self.roi_heads(features, proposal_boxes, gt_boxes)
1883
+
1884
+ # prepare FRCNN Outputs and select top proposals
1885
+ boxes, classes, class_probs, attrs, attr_probs, roi_features = self.roi_outputs(
1886
+ obj_logits=obj_logits,
1887
+ attr_logits=attr_logits,
1888
+ box_deltas=box_deltas,
1889
+ pred_boxes=proposal_boxes,
1890
+ features=feature_pooled,
1891
+ sizes=image_shapes,
1892
+ scales=scales_yx,
1893
+ )
1894
+
1895
+ # will we pad???
1896
+ subset_kwargs = {
1897
+ "max_detections": kwargs.get("max_detections", None),
1898
+ "return_tensors": kwargs.get("return_tensors", None),
1899
+ "pad_value": kwargs.get("pad_value", 0),
1900
+ "padding": kwargs.get("padding", None),
1901
+ }
1902
+ preds_per_image = torch.tensor([p.size(0) for p in boxes])
1903
+ boxes = pad_list_tensors(boxes, preds_per_image, **subset_kwargs)
1904
+ classes = pad_list_tensors(classes, preds_per_image, **subset_kwargs)
1905
+ class_probs = pad_list_tensors(class_probs, preds_per_image, **subset_kwargs)
1906
+ attrs = pad_list_tensors(attrs, preds_per_image, **subset_kwargs)
1907
+ attr_probs = pad_list_tensors(attr_probs, preds_per_image, **subset_kwargs)
1908
+ roi_features = pad_list_tensors(roi_features, preds_per_image, **subset_kwargs)
1909
+ subset_kwargs["padding"] = None
1910
+ preds_per_image = pad_list_tensors(preds_per_image, None, **subset_kwargs)
1911
+ sizes = pad_list_tensors(image_shapes, None, **subset_kwargs)
1912
+ #print (boxes.device, original_sizes.device)
1913
+ normalized_boxes = norm_box(boxes, original_sizes.to(boxes.device))
1914
+ return OrderedDict(
1915
+ {
1916
+ "obj_ids": classes,
1917
+ "obj_probs": class_probs,
1918
+ "attr_ids": attrs,
1919
+ "attr_probs": attr_probs,
1920
+ "boxes": boxes,
1921
+ "sizes": sizes,
1922
+ "preds_per_image": preds_per_image,
1923
+ "roi_features": roi_features,
1924
+ "normalized_boxes": normalized_boxes,
1925
+ }
1926
+ )