datnguyentien204 commited on
Commit
ee28498
·
verified ·
1 Parent(s): e3c6fb8

7d43461cde87cd8d635396cf3236e4c45d1a3eabb2336bdda952a4105d6816f0

Browse files
Retinaface_model_v2/Resnet50_Final.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d1de9c2944f2ccddca5f5e010ea5ae64a39845a86311af6fdf30841b0a5a16d
3
+ size 109497761
Retinaface_model_v2/mobilenet0.25_Final.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2979b33ffafda5d74b6948cd7a5b9a7a62f62b949cef24e95fd15d2883a65220
3
+ size 1789735
Retinaface_model_v2/mobilenetV1X0.25_pretrain.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75c37f8d438b7c6aed2e6b310fd0540c40dabf0dd820770bd7082668dfd7c1c1
3
+ size 3827150
models/__pycache__/retinaface.cpython-38.pyc ADDED
Binary file (5.51 kB). View file
 
models/net.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import torch
3
+ import torch.nn as nn
4
+ import torchvision.models._utils as _utils
5
+ import torchvision.models as models
6
+ import torch.nn.functional as F
7
+ from torch.autograd import Variable
8
+
9
+ def conv_bn(inp, oup, stride = 1, leaky = 0):
10
+ return nn.Sequential(
11
+ nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
12
+ nn.BatchNorm2d(oup),
13
+ nn.LeakyReLU(negative_slope=leaky, inplace=True)
14
+ )
15
+
16
+ def conv_bn_no_relu(inp, oup, stride):
17
+ return nn.Sequential(
18
+ nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
19
+ nn.BatchNorm2d(oup),
20
+ )
21
+
22
+ def conv_bn1X1(inp, oup, stride, leaky=0):
23
+ return nn.Sequential(
24
+ nn.Conv2d(inp, oup, 1, stride, padding=0, bias=False),
25
+ nn.BatchNorm2d(oup),
26
+ nn.LeakyReLU(negative_slope=leaky, inplace=True)
27
+ )
28
+
29
+ def conv_dw(inp, oup, stride, leaky=0.1):
30
+ return nn.Sequential(
31
+ nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
32
+ nn.BatchNorm2d(inp),
33
+ nn.LeakyReLU(negative_slope= leaky,inplace=True),
34
+
35
+ nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
36
+ nn.BatchNorm2d(oup),
37
+ nn.LeakyReLU(negative_slope= leaky,inplace=True),
38
+ )
39
+
40
+ class SSH(nn.Module):
41
+ def __init__(self, in_channel, out_channel):
42
+ super(SSH, self).__init__()
43
+ assert out_channel % 4 == 0
44
+ leaky = 0
45
+ if (out_channel <= 64):
46
+ leaky = 0.1
47
+ self.conv3X3 = conv_bn_no_relu(in_channel, out_channel//2, stride=1)
48
+
49
+ self.conv5X5_1 = conv_bn(in_channel, out_channel//4, stride=1, leaky = leaky)
50
+ self.conv5X5_2 = conv_bn_no_relu(out_channel//4, out_channel//4, stride=1)
51
+
52
+ self.conv7X7_2 = conv_bn(out_channel//4, out_channel//4, stride=1, leaky = leaky)
53
+ self.conv7x7_3 = conv_bn_no_relu(out_channel//4, out_channel//4, stride=1)
54
+
55
+ def forward(self, input):
56
+ conv3X3 = self.conv3X3(input)
57
+
58
+ conv5X5_1 = self.conv5X5_1(input)
59
+ conv5X5 = self.conv5X5_2(conv5X5_1)
60
+
61
+ conv7X7_2 = self.conv7X7_2(conv5X5_1)
62
+ conv7X7 = self.conv7x7_3(conv7X7_2)
63
+
64
+ out = torch.cat([conv3X3, conv5X5, conv7X7], dim=1)
65
+ out = F.relu(out)
66
+ return out
67
+
68
+ class FPN(nn.Module):
69
+ def __init__(self,in_channels_list,out_channels):
70
+ super(FPN,self).__init__()
71
+ leaky = 0
72
+ if (out_channels <= 64):
73
+ leaky = 0.1
74
+ self.output1 = conv_bn1X1(in_channels_list[0], out_channels, stride = 1, leaky = leaky)
75
+ self.output2 = conv_bn1X1(in_channels_list[1], out_channels, stride = 1, leaky = leaky)
76
+ self.output3 = conv_bn1X1(in_channels_list[2], out_channels, stride = 1, leaky = leaky)
77
+
78
+ self.merge1 = conv_bn(out_channels, out_channels, leaky = leaky)
79
+ self.merge2 = conv_bn(out_channels, out_channels, leaky = leaky)
80
+
81
+ def forward(self, input):
82
+ # names = list(input.keys())
83
+ input = list(input.values())
84
+
85
+ output1 = self.output1(input[0])
86
+ output2 = self.output2(input[1])
87
+ output3 = self.output3(input[2])
88
+
89
+ up3 = F.interpolate(output3, size=[output2.size(2), output2.size(3)], mode="nearest")
90
+ output2 = output2 + up3
91
+ output2 = self.merge2(output2)
92
+
93
+ up2 = F.interpolate(output2, size=[output1.size(2), output1.size(3)], mode="nearest")
94
+ output1 = output1 + up2
95
+ output1 = self.merge1(output1)
96
+
97
+ out = [output1, output2, output3]
98
+ return out
99
+
100
+
101
+
102
+ class MobileNetV1(nn.Module):
103
+ def __init__(self):
104
+ super(MobileNetV1, self).__init__()
105
+ self.stage1 = nn.Sequential(
106
+ conv_bn(3, 8, 2, leaky = 0.1), # 3
107
+ conv_dw(8, 16, 1), # 7
108
+ conv_dw(16, 32, 2), # 11
109
+ conv_dw(32, 32, 1), # 19
110
+ conv_dw(32, 64, 2), # 27
111
+ conv_dw(64, 64, 1), # 43
112
+ )
113
+ self.stage2 = nn.Sequential(
114
+ conv_dw(64, 128, 2), # 43 + 16 = 59
115
+ conv_dw(128, 128, 1), # 59 + 32 = 91
116
+ conv_dw(128, 128, 1), # 91 + 32 = 123
117
+ conv_dw(128, 128, 1), # 123 + 32 = 155
118
+ conv_dw(128, 128, 1), # 155 + 32 = 187
119
+ conv_dw(128, 128, 1), # 187 + 32 = 219
120
+ )
121
+ self.stage3 = nn.Sequential(
122
+ conv_dw(128, 256, 2), # 219 +3 2 = 241
123
+ conv_dw(256, 256, 1), # 241 + 64 = 301
124
+ )
125
+ self.avg = nn.AdaptiveAvgPool2d((1,1))
126
+ self.fc = nn.Linear(256, 1000)
127
+
128
+ def forward(self, x):
129
+ x = self.stage1(x)
130
+ x = self.stage2(x)
131
+ x = self.stage3(x)
132
+ x = self.avg(x)
133
+ # x = self.model(x)
134
+ x = x.view(-1, 256)
135
+ x = self.fc(x)
136
+ return x
137
+
models/retinaface.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torchvision.models.detection.backbone_utils as backbone_utils
4
+ import torchvision.models._utils as _utils
5
+ import torch.nn.functional as F
6
+ from collections import OrderedDict
7
+
8
+ from models.net import MobileNetV1 as MobileNetV1
9
+ from models.net import FPN as FPN
10
+ from models.net import SSH as SSH
11
+
12
+
13
+
14
+ class ClassHead(nn.Module):
15
+ def __init__(self,inchannels=512,num_anchors=3):
16
+ super(ClassHead,self).__init__()
17
+ self.num_anchors = num_anchors
18
+ self.conv1x1 = nn.Conv2d(inchannels,self.num_anchors*2,kernel_size=(1,1),stride=1,padding=0)
19
+
20
+ def forward(self,x):
21
+ out = self.conv1x1(x)
22
+ out = out.permute(0,2,3,1).contiguous()
23
+
24
+ return out.view(out.shape[0], -1, 2)
25
+
26
+ class BboxHead(nn.Module):
27
+ def __init__(self,inchannels=512,num_anchors=3):
28
+ super(BboxHead,self).__init__()
29
+ self.conv1x1 = nn.Conv2d(inchannels,num_anchors*4,kernel_size=(1,1),stride=1,padding=0)
30
+
31
+ def forward(self,x):
32
+ out = self.conv1x1(x)
33
+ out = out.permute(0,2,3,1).contiguous()
34
+
35
+ return out.view(out.shape[0], -1, 4)
36
+
37
+ class LandmarkHead(nn.Module):
38
+ def __init__(self,inchannels=512,num_anchors=3):
39
+ super(LandmarkHead,self).__init__()
40
+ self.conv1x1 = nn.Conv2d(inchannels,num_anchors*10,kernel_size=(1,1),stride=1,padding=0)
41
+
42
+ def forward(self,x):
43
+ out = self.conv1x1(x)
44
+ out = out.permute(0,2,3,1).contiguous()
45
+
46
+ return out.view(out.shape[0], -1, 10)
47
+
48
+ class RetinaFace(nn.Module):
49
+ def __init__(self, cfg = None, phase = 'train'):
50
+ """
51
+ :param cfg: Network related settings.
52
+ :param phase: train or test.
53
+ """
54
+ super(RetinaFace,self).__init__()
55
+ self.phase = phase
56
+ backbone = None
57
+ if cfg['name'] == 'mobilenet0.25':
58
+ backbone = MobileNetV1()
59
+ if cfg['pretrain']:
60
+ checkpoint = torch.load("./Retinaface_model_v2/mobilenetV1X0.25_pretrain.tar", map_location=torch.device('cpu'))
61
+ from collections import OrderedDict
62
+ new_state_dict = OrderedDict()
63
+ for k, v in checkpoint['state_dict'].items():
64
+ name = k[7:] # remove module.
65
+ new_state_dict[name] = v
66
+ # load params
67
+ backbone.load_state_dict(new_state_dict)
68
+ elif cfg['name'] == 'Resnet50':
69
+ import torchvision.models as models
70
+ backbone = models.resnet50(pretrained=cfg['pretrain'])
71
+
72
+ self.body = _utils.IntermediateLayerGetter(backbone, cfg['return_layers'])
73
+ in_channels_stage2 = cfg['in_channel']
74
+ in_channels_list = [
75
+ in_channels_stage2 * 2,
76
+ in_channels_stage2 * 4,
77
+ in_channels_stage2 * 8,
78
+ ]
79
+ out_channels = cfg['out_channel']
80
+ self.fpn = FPN(in_channels_list,out_channels)
81
+ self.ssh1 = SSH(out_channels, out_channels)
82
+ self.ssh2 = SSH(out_channels, out_channels)
83
+ self.ssh3 = SSH(out_channels, out_channels)
84
+
85
+ self.ClassHead = self._make_class_head(fpn_num=3, inchannels=cfg['out_channel'])
86
+ self.BboxHead = self._make_bbox_head(fpn_num=3, inchannels=cfg['out_channel'])
87
+ self.LandmarkHead = self._make_landmark_head(fpn_num=3, inchannels=cfg['out_channel'])
88
+
89
+ def _make_class_head(self,fpn_num=3,inchannels=64,anchor_num=2):
90
+ classhead = nn.ModuleList()
91
+ for i in range(fpn_num):
92
+ classhead.append(ClassHead(inchannels,anchor_num))
93
+ return classhead
94
+
95
+ def _make_bbox_head(self,fpn_num=3,inchannels=64,anchor_num=2):
96
+ bboxhead = nn.ModuleList()
97
+ for i in range(fpn_num):
98
+ bboxhead.append(BboxHead(inchannels,anchor_num))
99
+ return bboxhead
100
+
101
+ def _make_landmark_head(self,fpn_num=3,inchannels=64,anchor_num=2):
102
+ landmarkhead = nn.ModuleList()
103
+ for i in range(fpn_num):
104
+ landmarkhead.append(LandmarkHead(inchannels,anchor_num))
105
+ return landmarkhead
106
+
107
+ def forward(self,inputs):
108
+ out = self.body(inputs)
109
+
110
+ # FPN
111
+ fpn = self.fpn(out)
112
+
113
+ # SSH
114
+ feature1 = self.ssh1(fpn[0])
115
+ feature2 = self.ssh2(fpn[1])
116
+ feature3 = self.ssh3(fpn[2])
117
+ features = [feature1, feature2, feature3]
118
+
119
+ bbox_regressions = torch.cat([self.BboxHead[i](feature) for i, feature in enumerate(features)], dim=1)
120
+ classifications = torch.cat([self.ClassHead[i](feature) for i, feature in enumerate(features)],dim=1)
121
+ ldm_regressions = torch.cat([self.LandmarkHead[i](feature) for i, feature in enumerate(features)], dim=1)
122
+
123
+ if self.phase == 'train':
124
+ output = (bbox_regressions, classifications, ldm_regressions)
125
+ else:
126
+ output = (bbox_regressions, F.softmax(classifications, dim=-1), ldm_regressions)
127
+ return output
requirements_cpu.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Python 3.8
2
+ torch==1.10.1+cpu
3
+ torchvision==0.11.2+cpu
4
+ torchaudio==0.10.1
5
+ opencv-python==4.10.0.84
6
+ -f https://download.pytorch.org/whl/cpu/torch_stable.html
requirements_gpu ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Python 3.8
2
+ torch==1.10.1+cu11
3
+ torchvision==0.11.2+cu111
4
+ torchaudio==0.10.1
5
+ opencv-python==4.10.0.84
6
+ -f https://download.pytorch.org/whl/cu111/torch_stable.html
test_fddb.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import print_function
2
+ import os
3
+ import argparse
4
+ import torch
5
+ import torch.backends.cudnn as cudnn
6
+ import numpy as np
7
+ from data import cfg_mnet, cfg_re50
8
+ from layers.functions.prior_box import PriorBox
9
+ from utils.nms.py_cpu_nms import py_cpu_nms
10
+ import cv2
11
+ from models.retinaface import RetinaFace
12
+ from utils.box_utils import decode, decode_landm
13
+ from utils.timer import Timer
14
+
15
+ parser = argparse.ArgumentParser(description='Retinaface')
16
+
17
+ parser.add_argument('-m', '--trained_model', default='./weights/mobilenet0.25_Final.pth',
18
+ type=str, help='Trained state_dict file path to open')
19
+ parser.add_argument('--network', default='mobile0.25', help='Backbone network mobile0.25 or resnet50')
20
+ parser.add_argument('--save_folder', default='eval/', type=str, help='Dir to save results')
21
+ parser.add_argument('--cpu', action="store_true", default=False, help='Use cpu inference')
22
+ parser.add_argument('--dataset', default='FDDB', type=str, choices=['FDDB'], help='dataset')
23
+ parser.add_argument('--confidence_threshold', default=0.02, type=float, help='confidence_threshold')
24
+ parser.add_argument('--top_k', default=5000, type=int, help='top_k')
25
+ parser.add_argument('--nms_threshold', default=0.4, type=float, help='nms_threshold')
26
+ parser.add_argument('--keep_top_k', default=750, type=int, help='keep_top_k')
27
+ parser.add_argument('-s', '--save_image', action="store_true", default=False, help='show detection results')
28
+ parser.add_argument('--vis_thres', default=0.5, type=float, help='visualization_threshold')
29
+ args = parser.parse_args()
30
+
31
+
32
+ def check_keys(model, pretrained_state_dict):
33
+ ckpt_keys = set(pretrained_state_dict.keys())
34
+ model_keys = set(model.state_dict().keys())
35
+ used_pretrained_keys = model_keys & ckpt_keys
36
+ unused_pretrained_keys = ckpt_keys - model_keys
37
+ missing_keys = model_keys - ckpt_keys
38
+ print('Missing keys:{}'.format(len(missing_keys)))
39
+ print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))
40
+ print('Used keys:{}'.format(len(used_pretrained_keys)))
41
+ assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'
42
+ return True
43
+
44
+
45
+ def remove_prefix(state_dict, prefix):
46
+ ''' Old style model is stored with all names of parameters sharing common prefix 'module.' '''
47
+ print('remove prefix \'{}\''.format(prefix))
48
+ f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
49
+ return {f(key): value for key, value in state_dict.items()}
50
+
51
+
52
+ def load_model(model, pretrained_path, load_to_cpu):
53
+ print('Loading pretrained model from {}'.format(pretrained_path))
54
+ if load_to_cpu:
55
+ pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
56
+ else:
57
+ device = torch.cuda.current_device()
58
+ pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))
59
+ if "state_dict" in pretrained_dict.keys():
60
+ pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')
61
+ else:
62
+ pretrained_dict = remove_prefix(pretrained_dict, 'module.')
63
+ check_keys(model, pretrained_dict)
64
+ model.load_state_dict(pretrained_dict, strict=False)
65
+ return model
66
+
67
+
68
+ if __name__ == '__main__':
69
+ torch.set_grad_enabled(False)
70
+ cfg = None
71
+ if args.network == "mobile0.25":
72
+ cfg = cfg_mnet
73
+ elif args.network == "resnet50":
74
+ cfg = cfg_re50
75
+ # net and model
76
+ net = RetinaFace(cfg=cfg, phase = 'test')
77
+ net = load_model(net, args.trained_model, args.cpu)
78
+ net.eval()
79
+ print('Finished loading model!')
80
+ print(net)
81
+ cudnn.benchmark = True
82
+ device = torch.device("cpu" if args.cpu else "cuda")
83
+ net = net.to(device)
84
+
85
+
86
+ # save file
87
+ if not os.path.exists(args.save_folder):
88
+ os.makedirs(args.save_folder)
89
+ fw = open(os.path.join(args.save_folder, args.dataset + '_dets.txt'), 'w')
90
+
91
+ # testing dataset
92
+ testset_folder = os.path.join('data', args.dataset, 'images/')
93
+ testset_list = os.path.join('data', args.dataset, 'img_list.txt')
94
+ with open(testset_list, 'r') as fr:
95
+ test_dataset = fr.read().split()
96
+ num_images = len(test_dataset)
97
+
98
+ # testing scale
99
+ resize = 1
100
+
101
+ _t = {'forward_pass': Timer(), 'misc': Timer()}
102
+
103
+ # testing begin
104
+ for i, img_name in enumerate(test_dataset):
105
+ image_path = testset_folder + img_name + '.jpg'
106
+ img_raw = cv2.imread(image_path, cv2.IMREAD_COLOR)
107
+
108
+ img = np.float32(img_raw)
109
+ if resize != 1:
110
+ img = cv2.resize(img, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
111
+ im_height, im_width, _ = img.shape
112
+ scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
113
+ img -= (104, 117, 123)
114
+ img = img.transpose(2, 0, 1)
115
+ img = torch.from_numpy(img).unsqueeze(0)
116
+ img = img.to(device)
117
+ scale = scale.to(device)
118
+
119
+ _t['forward_pass'].tic()
120
+ loc, conf, landms = net(img) # forward pass
121
+ _t['forward_pass'].toc()
122
+ _t['misc'].tic()
123
+ priorbox = PriorBox(cfg, image_size=(im_height, im_width))
124
+ priors = priorbox.forward()
125
+ priors = priors.to(device)
126
+ prior_data = priors.data
127
+ boxes = decode(loc.data.squeeze(0), prior_data, cfg['variance'])
128
+ boxes = boxes * scale / resize
129
+ boxes = boxes.cpu().numpy()
130
+ scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
131
+ landms = decode_landm(landms.data.squeeze(0), prior_data, cfg['variance'])
132
+ scale1 = torch.Tensor([img.shape[3], img.shape[2], img.shape[3], img.shape[2],
133
+ img.shape[3], img.shape[2], img.shape[3], img.shape[2],
134
+ img.shape[3], img.shape[2]])
135
+ scale1 = scale1.to(device)
136
+ landms = landms * scale1 / resize
137
+ landms = landms.cpu().numpy()
138
+
139
+ # ignore low scores
140
+ inds = np.where(scores > args.confidence_threshold)[0]
141
+ boxes = boxes[inds]
142
+ landms = landms[inds]
143
+ scores = scores[inds]
144
+
145
+ # keep top-K before NMS
146
+ # order = scores.argsort()[::-1][:args.top_k]
147
+ order = scores.argsort()[::-1]
148
+ boxes = boxes[order]
149
+ landms = landms[order]
150
+ scores = scores[order]
151
+
152
+ # do NMS
153
+ dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
154
+ keep = py_cpu_nms(dets, args.nms_threshold)
155
+
156
+ dets = dets[keep, :]
157
+ landms = landms[keep]
158
+
159
+ # keep top-K faster NMS
160
+ # dets = dets[:args.keep_top_k, :]
161
+ # landms = landms[:args.keep_top_k, :]
162
+
163
+ dets = np.concatenate((dets, landms), axis=1)
164
+ _t['misc'].toc()
165
+
166
+ # save dets
167
+ if args.dataset == "FDDB":
168
+ fw.write('{:s}\n'.format(img_name))
169
+ fw.write('{:.1f}\n'.format(dets.shape[0]))
170
+ for k in range(dets.shape[0]):
171
+ xmin = dets[k, 0]
172
+ ymin = dets[k, 1]
173
+ xmax = dets[k, 2]
174
+ ymax = dets[k, 3]
175
+ score = dets[k, 4]
176
+ w = xmax - xmin + 1
177
+ h = ymax - ymin + 1
178
+ # fw.write('{:.3f} {:.3f} {:.3f} {:.3f} {:.10f}\n'.format(xmin, ymin, w, h, score))
179
+ fw.write('{:d} {:d} {:d} {:d} {:.10f}\n'.format(int(xmin), int(ymin), int(w), int(h), score))
180
+ print('im_detect: {:d}/{:d} forward_pass_time: {:.4f}s misc: {:.4f}s'.format(i + 1, num_images, _t['forward_pass'].average_time, _t['misc'].average_time))
181
+
182
+ # show image
183
+ if args.save_image:
184
+ for b in dets:
185
+ if b[4] < args.vis_thres:
186
+ continue
187
+ text = "{:.4f}".format(b[4])
188
+ b = list(map(int, b))
189
+ cv2.rectangle(img_raw, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 2)
190
+ cx = b[0]
191
+ cy = b[1] + 12
192
+ cv2.putText(img_raw, text, (cx, cy),
193
+ cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))
194
+
195
+ # landms
196
+ cv2.circle(img_raw, (b[5], b[6]), 1, (0, 0, 255), 4)
197
+ cv2.circle(img_raw, (b[7], b[8]), 1, (0, 255, 255), 4)
198
+ cv2.circle(img_raw, (b[9], b[10]), 1, (255, 0, 255), 4)
199
+ cv2.circle(img_raw, (b[11], b[12]), 1, (0, 255, 0), 4)
200
+ cv2.circle(img_raw, (b[13], b[14]), 1, (255, 0, 0), 4)
201
+ # save image
202
+ if not os.path.exists("./results/"):
203
+ os.makedirs("./results/")
204
+ name = "./results/" + str(i) + ".jpg"
205
+ cv2.imwrite(name, img_raw)
206
+
207
+ fw.close()
test_widerface.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import print_function
2
+ import os
3
+ import argparse
4
+ import torch
5
+ import torch.backends.cudnn as cudnn
6
+ import numpy as np
7
+ from data import cfg_mnet, cfg_re50
8
+ from layers.functions.prior_box import PriorBox
9
+ from utils.nms.py_cpu_nms import py_cpu_nms
10
+ import cv2
11
+ from models.retinaface import RetinaFace
12
+ from utils.box_utils import decode, decode_landm
13
+ from utils.timer import Timer
14
+
15
+
16
+ parser = argparse.ArgumentParser(description='Retinaface')
17
+ parser.add_argument('-m', '--trained_model', default='./weights/Resnet50_Final.pth',
18
+ type=str, help='Trained state_dict file path to open')
19
+ parser.add_argument('--network', default='resnet50', help='Backbone network mobile0.25 or resnet50')
20
+ parser.add_argument('--origin_size', default=True, type=str, help='Whether use origin image size to evaluate')
21
+ parser.add_argument('--save_folder', default='./widerface_evaluate/widerface_txt/', type=str, help='Dir to save txt results')
22
+ parser.add_argument('--cpu', action="store_true", default=False, help='Use cpu inference')
23
+ parser.add_argument('--dataset_folder', default='./data/widerface/val/images/', type=str, help='dataset path')
24
+ parser.add_argument('--confidence_threshold', default=0.02, type=float, help='confidence_threshold')
25
+ parser.add_argument('--top_k', default=5000, type=int, help='top_k')
26
+ parser.add_argument('--nms_threshold', default=0.4, type=float, help='nms_threshold')
27
+ parser.add_argument('--keep_top_k', default=750, type=int, help='keep_top_k')
28
+ parser.add_argument('-s', '--save_image', action="store_true", default=False, help='show detection results')
29
+ parser.add_argument('--vis_thres', default=0.5, type=float, help='visualization_threshold')
30
+ args = parser.parse_args()
31
+
32
+
33
+ def check_keys(model, pretrained_state_dict):
34
+ ckpt_keys = set(pretrained_state_dict.keys())
35
+ model_keys = set(model.state_dict().keys())
36
+ used_pretrained_keys = model_keys & ckpt_keys
37
+ unused_pretrained_keys = ckpt_keys - model_keys
38
+ missing_keys = model_keys - ckpt_keys
39
+ print('Missing keys:{}'.format(len(missing_keys)))
40
+ print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))
41
+ print('Used keys:{}'.format(len(used_pretrained_keys)))
42
+ assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'
43
+ return True
44
+
45
+
46
+ def remove_prefix(state_dict, prefix):
47
+ ''' Old style model is stored with all names of parameters sharing common prefix 'module.' '''
48
+ print('remove prefix \'{}\''.format(prefix))
49
+ f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
50
+ return {f(key): value for key, value in state_dict.items()}
51
+
52
+
53
+ def load_model(model, pretrained_path, load_to_cpu):
54
+ print('Loading pretrained model from {}'.format(pretrained_path))
55
+ if load_to_cpu:
56
+ pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
57
+ else:
58
+ device = torch.cuda.current_device()
59
+ pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))
60
+ if "state_dict" in pretrained_dict.keys():
61
+ pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')
62
+ else:
63
+ pretrained_dict = remove_prefix(pretrained_dict, 'module.')
64
+ check_keys(model, pretrained_dict)
65
+ model.load_state_dict(pretrained_dict, strict=False)
66
+ return model
67
+
68
+
69
+ if __name__ == '__main__':
70
+ torch.set_grad_enabled(False)
71
+
72
+ cfg = None
73
+ if args.network == "mobile0.25":
74
+ cfg = cfg_mnet
75
+ elif args.network == "resnet50":
76
+ cfg = cfg_re50
77
+ # net and model
78
+ net = RetinaFace(cfg=cfg, phase = 'test')
79
+ net = load_model(net, args.trained_model, args.cpu)
80
+ net.eval()
81
+ print('Finished loading model!')
82
+ print(net)
83
+ cudnn.benchmark = True
84
+ device = torch.device("cpu" if args.cpu else "cuda")
85
+ net = net.to(device)
86
+
87
+ # testing dataset
88
+ testset_folder = args.dataset_folder
89
+ testset_list = args.dataset_folder[:-7] + "wider_val.txt"
90
+
91
+ with open(testset_list, 'r') as fr:
92
+ test_dataset = fr.read().split()
93
+ num_images = len(test_dataset)
94
+
95
+ _t = {'forward_pass': Timer(), 'misc': Timer()}
96
+
97
+ # testing begin
98
+ for i, img_name in enumerate(test_dataset):
99
+ image_path = testset_folder + img_name
100
+ img_raw = cv2.imread(image_path, cv2.IMREAD_COLOR)
101
+ img = np.float32(img_raw)
102
+
103
+ # testing scale
104
+ target_size = 1600
105
+ max_size = 2150
106
+ im_shape = img.shape
107
+ im_size_min = np.min(im_shape[0:2])
108
+ im_size_max = np.max(im_shape[0:2])
109
+ resize = float(target_size) / float(im_size_min)
110
+ # prevent bigger axis from being more than max_size:
111
+ if np.round(resize * im_size_max) > max_size:
112
+ resize = float(max_size) / float(im_size_max)
113
+ if args.origin_size:
114
+ resize = 1
115
+
116
+ if resize != 1:
117
+ img = cv2.resize(img, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
118
+ im_height, im_width, _ = img.shape
119
+ scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
120
+ img -= (104, 117, 123)
121
+ img = img.transpose(2, 0, 1)
122
+ img = torch.from_numpy(img).unsqueeze(0)
123
+ img = img.to(device)
124
+ scale = scale.to(device)
125
+
126
+ _t['forward_pass'].tic()
127
+ loc, conf, landms = net(img) # forward pass
128
+ _t['forward_pass'].toc()
129
+ _t['misc'].tic()
130
+ priorbox = PriorBox(cfg, image_size=(im_height, im_width))
131
+ priors = priorbox.forward()
132
+ priors = priors.to(device)
133
+ prior_data = priors.data
134
+ boxes = decode(loc.data.squeeze(0), prior_data, cfg['variance'])
135
+ boxes = boxes * scale / resize
136
+ boxes = boxes.cpu().numpy()
137
+ scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
138
+ landms = decode_landm(landms.data.squeeze(0), prior_data, cfg['variance'])
139
+ scale1 = torch.Tensor([img.shape[3], img.shape[2], img.shape[3], img.shape[2],
140
+ img.shape[3], img.shape[2], img.shape[3], img.shape[2],
141
+ img.shape[3], img.shape[2]])
142
+ scale1 = scale1.to(device)
143
+ landms = landms * scale1 / resize
144
+ landms = landms.cpu().numpy()
145
+
146
+ # ignore low scores
147
+ inds = np.where(scores > args.confidence_threshold)[0]
148
+ boxes = boxes[inds]
149
+ landms = landms[inds]
150
+ scores = scores[inds]
151
+
152
+ # keep top-K before NMS
153
+ order = scores.argsort()[::-1]
154
+ # order = scores.argsort()[::-1][:args.top_k]
155
+ boxes = boxes[order]
156
+ landms = landms[order]
157
+ scores = scores[order]
158
+
159
+ # do NMS
160
+ dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
161
+ keep = py_cpu_nms(dets, args.nms_threshold)
162
+ # keep = nms(dets, args.nms_threshold,force_cpu=args.cpu)
163
+ dets = dets[keep, :]
164
+ landms = landms[keep]
165
+
166
+ # keep top-K faster NMS
167
+ # dets = dets[:args.keep_top_k, :]
168
+ # landms = landms[:args.keep_top_k, :]
169
+
170
+ dets = np.concatenate((dets, landms), axis=1)
171
+ _t['misc'].toc()
172
+
173
+ # --------------------------------------------------------------------
174
+ save_name = args.save_folder + img_name[:-4] + ".txt"
175
+ dirname = os.path.dirname(save_name)
176
+ if not os.path.isdir(dirname):
177
+ os.makedirs(dirname)
178
+ with open(save_name, "w") as fd:
179
+ bboxs = dets
180
+ file_name = os.path.basename(save_name)[:-4] + "\n"
181
+ bboxs_num = str(len(bboxs)) + "\n"
182
+ fd.write(file_name)
183
+ fd.write(bboxs_num)
184
+ for box in bboxs:
185
+ x = int(box[0])
186
+ y = int(box[1])
187
+ w = int(box[2]) - int(box[0])
188
+ h = int(box[3]) - int(box[1])
189
+ confidence = str(box[4])
190
+ line = str(x) + " " + str(y) + " " + str(w) + " " + str(h) + " " + confidence + " \n"
191
+ fd.write(line)
192
+
193
+ print('im_detect: {:d}/{:d} forward_pass_time: {:.4f}s misc: {:.4f}s'.format(i + 1, num_images, _t['forward_pass'].average_time, _t['misc'].average_time))
194
+
195
+ # save image
196
+ if args.save_image:
197
+ for b in dets:
198
+ if b[4] < args.vis_thres:
199
+ continue
200
+ text = "{:.4f}".format(b[4])
201
+ b = list(map(int, b))
202
+ cv2.rectangle(img_raw, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 2)
203
+ cx = b[0]
204
+ cy = b[1] + 12
205
+ cv2.putText(img_raw, text, (cx, cy),
206
+ cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))
207
+
208
+ # landms
209
+ cv2.circle(img_raw, (b[5], b[6]), 1, (0, 0, 255), 4)
210
+ cv2.circle(img_raw, (b[7], b[8]), 1, (0, 255, 255), 4)
211
+ cv2.circle(img_raw, (b[9], b[10]), 1, (255, 0, 255), 4)
212
+ cv2.circle(img_raw, (b[11], b[12]), 1, (0, 255, 0), 4)
213
+ cv2.circle(img_raw, (b[13], b[14]), 1, (255, 0, 0), 4)
214
+ # save image
215
+ if not os.path.exists("./results/"):
216
+ os.makedirs("./results/")
217
+ name = "./results/" + str(i) + ".jpg"
218
+ cv2.imwrite(name, img_raw)
219
+
train.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import print_function
2
+ import os
3
+ import torch
4
+ import torch.optim as optim
5
+ import torch.backends.cudnn as cudnn
6
+ import argparse
7
+ import torch.utils.data as data
8
+ from data import WiderFaceDetection, detection_collate, preproc, cfg_mnet, cfg_re50
9
+ from layers.modules import MultiBoxLoss
10
+ from layers.functions.prior_box import PriorBox
11
+ import time
12
+ import datetime
13
+ import math
14
+ from models.retinaface import RetinaFace
15
+
16
+ parser = argparse.ArgumentParser(description='Retinaface Training')
17
+ parser.add_argument('--training_dataset', default='./dataset/widerface/widerface/train/label.txt', help='Training dataset directory')
18
+ parser.add_argument('--network', default='mobile0.25', help='Backbone network mobile0.25 or resnet50')
19
+ parser.add_argument('--num_workers', default=4, type=int, help='Number of workers used in dataloading')
20
+ parser.add_argument('--lr', '--learning-rate', default=1e-3, type=float, help='initial learning rate')
21
+ parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
22
+ parser.add_argument('--resume_net', default=None, help='resume net for retraining')
23
+ parser.add_argument('--resume_epoch', default=0, type=int, help='resume iter for retraining')
24
+ parser.add_argument('--weight_decay', default=5e-4, type=float, help='Weight decay for SGD')
25
+ parser.add_argument('--gamma', default=0.1, type=float, help='Gamma update for SGD')
26
+ parser.add_argument('--save_folder', default='./weights/', help='Location to save checkpoint models')
27
+
28
+ args = parser.parse_args()
29
+
30
+ if not os.path.exists(args.save_folder):
31
+ os.mkdir(args.save_folder)
32
+ cfg = None
33
+ if args.network == "mobile0.25":
34
+ cfg = cfg_mnet
35
+ elif args.network == "resnet50":
36
+ cfg = cfg_re50
37
+
38
+ rgb_mean = (104, 117, 123) # bgr order
39
+ num_classes = 2
40
+ img_dim = cfg['image_size']
41
+ num_gpu = cfg['ngpu']
42
+ batch_size = cfg['batch_size']
43
+ max_epoch = cfg['epoch']
44
+ gpu_train = cfg['gpu_train']
45
+
46
+ num_workers = args.num_workers
47
+ momentum = args.momentum
48
+ weight_decay = args.weight_decay
49
+ initial_lr = args.lr
50
+ gamma = args.gamma
51
+ training_dataset = args.training_dataset
52
+ save_folder = args.save_folder
53
+
54
+ net = RetinaFace(cfg=cfg)
55
+ print("Printing net...")
56
+ print(net)
57
+
58
+ if args.resume_net is not None:
59
+ print('Loading resume network...')
60
+ state_dict = torch.load(args.resume_net)
61
+ # create new OrderedDict that does not contain `module.`
62
+ from collections import OrderedDict
63
+ new_state_dict = OrderedDict()
64
+ for k, v in state_dict.items():
65
+ head = k[:7]
66
+ if head == 'module.':
67
+ name = k[7:] # remove `module.`
68
+ else:
69
+ name = k
70
+ new_state_dict[name] = v
71
+ net.load_state_dict(new_state_dict)
72
+
73
+ if num_gpu > 1 and gpu_train:
74
+ net = torch.nn.DataParallel(net).cuda()
75
+ else:
76
+ net = net.cuda()
77
+
78
+ cudnn.benchmark = True
79
+
80
+
81
+ optimizer = optim.SGD(net.parameters(), lr=initial_lr, momentum=momentum, weight_decay=weight_decay)
82
+ criterion = MultiBoxLoss(num_classes, 0.35, True, 0, True, 7, 0.35, False)
83
+
84
+ priorbox = PriorBox(cfg, image_size=(img_dim, img_dim))
85
+ with torch.no_grad():
86
+ priors = priorbox.forward()
87
+ priors = priors.cuda()
88
+
89
+ def train():
90
+ net.train()
91
+ epoch = 0 + args.resume_epoch
92
+ print('Loading Dataset...')
93
+
94
+ dataset = WiderFaceDetection( training_dataset,preproc(img_dim, rgb_mean))
95
+
96
+ epoch_size = math.ceil(len(dataset) / batch_size)
97
+ max_iter = max_epoch * epoch_size
98
+
99
+ stepvalues = (cfg['decay1'] * epoch_size, cfg['decay2'] * epoch_size)
100
+ step_index = 0
101
+
102
+ if args.resume_epoch > 0:
103
+ start_iter = args.resume_epoch * epoch_size
104
+ else:
105
+ start_iter = 0
106
+
107
+ for iteration in range(start_iter, max_iter):
108
+ if iteration % epoch_size == 0:
109
+ # create batch iterator
110
+ batch_iterator = iter(data.DataLoader(dataset, batch_size, shuffle=True, num_workers=num_workers, collate_fn=detection_collate))
111
+ if (epoch % 10 == 0 and epoch > 0) or (epoch % 5 == 0 and epoch > cfg['decay1']):
112
+ torch.save(net.state_dict(), save_folder + cfg['name']+ '_epoch_' + str(epoch) + '.pth')
113
+ epoch += 1
114
+
115
+ load_t0 = time.time()
116
+ if iteration in stepvalues:
117
+ step_index += 1
118
+ lr = adjust_learning_rate(optimizer, gamma, epoch, step_index, iteration, epoch_size)
119
+
120
+ # load train data
121
+ images, targets = next(batch_iterator)
122
+ images = images.cuda()
123
+ targets = [anno.cuda() for anno in targets]
124
+
125
+ # forward
126
+ out = net(images)
127
+
128
+ # backprop
129
+ optimizer.zero_grad()
130
+ loss_l, loss_c, loss_landm = criterion(out, priors, targets)
131
+ loss = cfg['loc_weight'] * loss_l + loss_c + loss_landm
132
+ loss.backward()
133
+ optimizer.step()
134
+ load_t1 = time.time()
135
+ batch_time = load_t1 - load_t0
136
+ eta = int(batch_time * (max_iter - iteration))
137
+ print('Epoch:{}/{} || Epochiter: {}/{} || Iter: {}/{} || Loc: {:.4f} Cla: {:.4f} Landm: {:.4f} || LR: {:.8f} || Batchtime: {:.4f} s || ETA: {}'
138
+ .format(epoch, max_epoch, (iteration % epoch_size) + 1,
139
+ epoch_size, iteration + 1, max_iter, loss_l.item(), loss_c.item(), loss_landm.item(), lr, batch_time, str(datetime.timedelta(seconds=eta))))
140
+
141
+ torch.save(net.state_dict(), save_folder + cfg['name'] + '_Final.pth')
142
+ # torch.save(net.state_dict(), save_folder + 'Final_Retinaface.pth')
143
+
144
+
145
+ def adjust_learning_rate(optimizer, gamma, epoch, step_index, iteration, epoch_size):
146
+ """Sets the learning rate
147
+ # Adapted from PyTorch Imagenet example:
148
+ # https://github.com/pytorch/examples/blob/master/imagenet/main.py
149
+ """
150
+ warmup_epoch = -1
151
+ if epoch <= warmup_epoch:
152
+ lr = 1e-6 + (initial_lr-1e-6) * iteration / (epoch_size * warmup_epoch)
153
+ else:
154
+ lr = initial_lr * (gamma ** (step_index))
155
+ for param_group in optimizer.param_groups:
156
+ param_group['lr'] = lr
157
+ return lr
158
+
159
+ if __name__ == '__main__':
160
+ train()
utils/__init__.py ADDED
File without changes
utils/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (148 Bytes). View file
 
utils/__pycache__/box_utils.cpython-38.pyc ADDED
Binary file (11.4 kB). View file
 
utils/box_utils.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+
4
+
5
+ def point_form(boxes):
6
+ """ Convert prior_boxes to (xmin, ymin, xmax, ymax)
7
+ representation for comparison to point form ground truth data.
8
+ Args:
9
+ boxes: (tensor) center-size default boxes from priorbox layers.
10
+ Return:
11
+ boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
12
+ """
13
+ return torch.cat((boxes[:, :2] - boxes[:, 2:]/2, # xmin, ymin
14
+ boxes[:, :2] + boxes[:, 2:]/2), 1) # xmax, ymax
15
+
16
+
17
+ def center_size(boxes):
18
+ """ Convert prior_boxes to (cx, cy, w, h)
19
+ representation for comparison to center-size form ground truth data.
20
+ Args:
21
+ boxes: (tensor) point_form boxes
22
+ Return:
23
+ boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
24
+ """
25
+ return torch.cat((boxes[:, 2:] + boxes[:, :2])/2, # cx, cy
26
+ boxes[:, 2:] - boxes[:, :2], 1) # w, h
27
+
28
+
29
+ def intersect(box_a, box_b):
30
+ """ We resize both tensors to [A,B,2] without new malloc:
31
+ [A,2] -> [A,1,2] -> [A,B,2]
32
+ [B,2] -> [1,B,2] -> [A,B,2]
33
+ Then we compute the area of intersect between box_a and box_b.
34
+ Args:
35
+ box_a: (tensor) bounding boxes, Shape: [A,4].
36
+ box_b: (tensor) bounding boxes, Shape: [B,4].
37
+ Return:
38
+ (tensor) intersection area, Shape: [A,B].
39
+ """
40
+ A = box_a.size(0)
41
+ B = box_b.size(0)
42
+ max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
43
+ box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
44
+ min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
45
+ box_b[:, :2].unsqueeze(0).expand(A, B, 2))
46
+ inter = torch.clamp((max_xy - min_xy), min=0)
47
+ return inter[:, :, 0] * inter[:, :, 1]
48
+
49
+
50
+ def jaccard(box_a, box_b):
51
+ """Compute the jaccard overlap of two sets of boxes. The jaccard overlap
52
+ is simply the intersection over union of two boxes. Here we operate on
53
+ ground truth boxes and default boxes.
54
+ E.g.:
55
+ A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
56
+ Args:
57
+ box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]
58
+ box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]
59
+ Return:
60
+ jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]
61
+ """
62
+ inter = intersect(box_a, box_b)
63
+ area_a = ((box_a[:, 2]-box_a[:, 0]) *
64
+ (box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B]
65
+ area_b = ((box_b[:, 2]-box_b[:, 0]) *
66
+ (box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B]
67
+ union = area_a + area_b - inter
68
+ return inter / union # [A,B]
69
+
70
+
71
+ def matrix_iou(a, b):
72
+ """
73
+ return iou of a and b, numpy version for data augenmentation
74
+ """
75
+ lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])
76
+ rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
77
+
78
+ area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)
79
+ area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
80
+ area_b = np.prod(b[:, 2:] - b[:, :2], axis=1)
81
+ return area_i / (area_a[:, np.newaxis] + area_b - area_i)
82
+
83
+
84
+ def matrix_iof(a, b):
85
+ """
86
+ return iof of a and b, numpy version for data augenmentation
87
+ """
88
+ lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])
89
+ rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
90
+
91
+ area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)
92
+ area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
93
+ return area_i / np.maximum(area_a[:, np.newaxis], 1)
94
+
95
+
96
+ def match(threshold, truths, priors, variances, labels, landms, loc_t, conf_t, landm_t, idx):
97
+ """Match each prior box with the ground truth box of the highest jaccard
98
+ overlap, encode the bounding boxes, then return the matched indices
99
+ corresponding to both confidence and location preds.
100
+ Args:
101
+ threshold: (float) The overlap threshold used when mathing boxes.
102
+ truths: (tensor) Ground truth boxes, Shape: [num_obj, 4].
103
+ priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4].
104
+ variances: (tensor) Variances corresponding to each prior coord,
105
+ Shape: [num_priors, 4].
106
+ labels: (tensor) All the class labels for the image, Shape: [num_obj].
107
+ landms: (tensor) Ground truth landms, Shape [num_obj, 10].
108
+ loc_t: (tensor) Tensor to be filled w/ endcoded location targets.
109
+ conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds.
110
+ landm_t: (tensor) Tensor to be filled w/ endcoded landm targets.
111
+ idx: (int) current batch index
112
+ Return:
113
+ The matched indices corresponding to 1)location 2)confidence 3)landm preds.
114
+ """
115
+ # jaccard index
116
+ overlaps = jaccard(
117
+ truths,
118
+ point_form(priors)
119
+ )
120
+ # (Bipartite Matching)
121
+ # [1,num_objects] best prior for each ground truth
122
+ best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True)
123
+
124
+ # ignore hard gt
125
+ valid_gt_idx = best_prior_overlap[:, 0] >= 0.2
126
+ best_prior_idx_filter = best_prior_idx[valid_gt_idx, :]
127
+ if best_prior_idx_filter.shape[0] <= 0:
128
+ loc_t[idx] = 0
129
+ conf_t[idx] = 0
130
+ return
131
+
132
+ # [1,num_priors] best ground truth for each prior
133
+ best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True)
134
+ best_truth_idx.squeeze_(0)
135
+ best_truth_overlap.squeeze_(0)
136
+ best_prior_idx.squeeze_(1)
137
+ best_prior_idx_filter.squeeze_(1)
138
+ best_prior_overlap.squeeze_(1)
139
+ best_truth_overlap.index_fill_(0, best_prior_idx_filter, 2) # ensure best prior
140
+ # TODO refactor: index best_prior_idx with long tensor
141
+ # ensure every gt matches with its prior of max overlap
142
+ for j in range(best_prior_idx.size(0)): # 判别此anchor是预测哪一个boxes
143
+ best_truth_idx[best_prior_idx[j]] = j
144
+ matches = truths[best_truth_idx] # Shape: [num_priors,4] 此处为每一个anchor对应的bbox取出来
145
+ conf = labels[best_truth_idx] # Shape: [num_priors] 此处为每一个anchor对应的label取出来
146
+ conf[best_truth_overlap < threshold] = 0 # label as background overlap<0.35的全部作为负样本
147
+ loc = encode(matches, priors, variances)
148
+
149
+ matches_landm = landms[best_truth_idx]
150
+ landm = encode_landm(matches_landm, priors, variances)
151
+ loc_t[idx] = loc # [num_priors,4] encoded offsets to learn
152
+ conf_t[idx] = conf # [num_priors] top class label for each prior
153
+ landm_t[idx] = landm
154
+
155
+
156
+ def encode(matched, priors, variances):
157
+ """Encode the variances from the priorbox layers into the ground truth boxes
158
+ we have matched (based on jaccard overlap) with the prior boxes.
159
+ Args:
160
+ matched: (tensor) Coords of ground truth for each prior in point-form
161
+ Shape: [num_priors, 4].
162
+ priors: (tensor) Prior boxes in center-offset form
163
+ Shape: [num_priors,4].
164
+ variances: (list[float]) Variances of priorboxes
165
+ Return:
166
+ encoded boxes (tensor), Shape: [num_priors, 4]
167
+ """
168
+
169
+ # dist b/t match center and prior's center
170
+ g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]
171
+ # encode variance
172
+ g_cxcy /= (variances[0] * priors[:, 2:])
173
+ # match wh / prior wh
174
+ g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
175
+ g_wh = torch.log(g_wh) / variances[1]
176
+ # return target for smooth_l1_loss
177
+ return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]
178
+
179
+ def encode_landm(matched, priors, variances):
180
+ """Encode the variances from the priorbox layers into the ground truth boxes
181
+ we have matched (based on jaccard overlap) with the prior boxes.
182
+ Args:
183
+ matched: (tensor) Coords of ground truth for each prior in point-form
184
+ Shape: [num_priors, 10].
185
+ priors: (tensor) Prior boxes in center-offset form
186
+ Shape: [num_priors,4].
187
+ variances: (list[float]) Variances of priorboxes
188
+ Return:
189
+ encoded landm (tensor), Shape: [num_priors, 10]
190
+ """
191
+
192
+ # dist b/t match center and prior's center
193
+ matched = torch.reshape(matched, (matched.size(0), 5, 2))
194
+ priors_cx = priors[:, 0].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
195
+ priors_cy = priors[:, 1].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
196
+ priors_w = priors[:, 2].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
197
+ priors_h = priors[:, 3].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
198
+ priors = torch.cat([priors_cx, priors_cy, priors_w, priors_h], dim=2)
199
+ g_cxcy = matched[:, :, :2] - priors[:, :, :2]
200
+ # encode variance
201
+ g_cxcy /= (variances[0] * priors[:, :, 2:])
202
+ # g_cxcy /= priors[:, :, 2:]
203
+ g_cxcy = g_cxcy.reshape(g_cxcy.size(0), -1)
204
+ # return target for smooth_l1_loss
205
+ return g_cxcy
206
+
207
+
208
+ # Adapted from https://github.com/Hakuyume/chainer-ssd
209
+ def decode(loc, priors, variances):
210
+ """Decode locations from predictions using priors to undo
211
+ the encoding we did for offset regression at train time.
212
+ Args:
213
+ loc (tensor): location predictions for loc layers,
214
+ Shape: [num_priors,4]
215
+ priors (tensor): Prior boxes in center-offset form.
216
+ Shape: [num_priors,4].
217
+ variances: (list[float]) Variances of priorboxes
218
+ Return:
219
+ decoded bounding box predictions
220
+ """
221
+
222
+ boxes = torch.cat((
223
+ priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
224
+ priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
225
+ boxes[:, :2] -= boxes[:, 2:] / 2
226
+ boxes[:, 2:] += boxes[:, :2]
227
+ return boxes
228
+
229
+ def decode_landm(pre, priors, variances):
230
+ """Decode landm from predictions using priors to undo
231
+ the encoding we did for offset regression at train time.
232
+ Args:
233
+ pre (tensor): landm predictions for loc layers,
234
+ Shape: [num_priors,10]
235
+ priors (tensor): Prior boxes in center-offset form.
236
+ Shape: [num_priors,4].
237
+ variances: (list[float]) Variances of priorboxes
238
+ Return:
239
+ decoded landm predictions
240
+ """
241
+ landms = torch.cat((priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:],
242
+ priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:],
243
+ priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:],
244
+ priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:],
245
+ priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:],
246
+ ), dim=1)
247
+ return landms
248
+
249
+
250
+ def log_sum_exp(x):
251
+ """Utility function for computing log_sum_exp while determining
252
+ This will be used to determine unaveraged confidence loss across
253
+ all examples in a batch.
254
+ Args:
255
+ x (Variable(tensor)): conf_preds from conf layers
256
+ """
257
+ x_max = x.data.max()
258
+ return torch.log(torch.sum(torch.exp(x-x_max), 1, keepdim=True)) + x_max
259
+
260
+
261
+ # Original author: Francisco Massa:
262
+ # https://github.com/fmassa/object-detection.torch
263
+ # Ported to PyTorch by Max deGroot (02/01/2017)
264
+ def nms(boxes, scores, overlap=0.5, top_k=200):
265
+ """Apply non-maximum suppression at test time to avoid detecting too many
266
+ overlapping bounding boxes for a given object.
267
+ Args:
268
+ boxes: (tensor) The location preds for the img, Shape: [num_priors,4].
269
+ scores: (tensor) The class predscores for the img, Shape:[num_priors].
270
+ overlap: (float) The overlap thresh for suppressing unnecessary boxes.
271
+ top_k: (int) The Maximum number of box preds to consider.
272
+ Return:
273
+ The indices of the kept boxes with respect to num_priors.
274
+ """
275
+
276
+ keep = torch.Tensor(scores.size(0)).fill_(0).long()
277
+ if boxes.numel() == 0:
278
+ return keep
279
+ x1 = boxes[:, 0]
280
+ y1 = boxes[:, 1]
281
+ x2 = boxes[:, 2]
282
+ y2 = boxes[:, 3]
283
+ area = torch.mul(x2 - x1, y2 - y1)
284
+ v, idx = scores.sort(0) # sort in ascending order
285
+ # I = I[v >= 0.01]
286
+ idx = idx[-top_k:] # indices of the top-k largest vals
287
+ xx1 = boxes.new()
288
+ yy1 = boxes.new()
289
+ xx2 = boxes.new()
290
+ yy2 = boxes.new()
291
+ w = boxes.new()
292
+ h = boxes.new()
293
+
294
+ # keep = torch.Tensor()
295
+ count = 0
296
+ while idx.numel() > 0:
297
+ i = idx[-1] # index of current largest val
298
+ # keep.append(i)
299
+ keep[count] = i
300
+ count += 1
301
+ if idx.size(0) == 1:
302
+ break
303
+ idx = idx[:-1] # remove kept element from view
304
+ # load bboxes of next highest vals
305
+ torch.index_select(x1, 0, idx, out=xx1)
306
+ torch.index_select(y1, 0, idx, out=yy1)
307
+ torch.index_select(x2, 0, idx, out=xx2)
308
+ torch.index_select(y2, 0, idx, out=yy2)
309
+ # store element-wise max with next highest score
310
+ xx1 = torch.clamp(xx1, min=x1[i])
311
+ yy1 = torch.clamp(yy1, min=y1[i])
312
+ xx2 = torch.clamp(xx2, max=x2[i])
313
+ yy2 = torch.clamp(yy2, max=y2[i])
314
+ w.resize_as_(xx2)
315
+ h.resize_as_(yy2)
316
+ w = xx2 - xx1
317
+ h = yy2 - yy1
318
+ # check sizes of xx1 and xx2.. after each iteration
319
+ w = torch.clamp(w, min=0.0)
320
+ h = torch.clamp(h, min=0.0)
321
+ inter = w*h
322
+ # IoU = i / (area(a) + area(b) - i)
323
+ rem_areas = torch.index_select(area, 0, idx) # load remaining areas)
324
+ union = (rem_areas - inter) + area[i]
325
+ IoU = inter/union # store result in iou
326
+ # keep only elements with an IoU <= overlap
327
+ idx = idx[IoU.le(overlap)]
328
+ return keep, count
329
+
330
+
utils/nms/__init__.py ADDED
File without changes
utils/nms/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (152 Bytes). View file
 
utils/nms/__pycache__/py_cpu_nms.cpython-38.pyc ADDED
Binary file (920 Bytes). View file
 
utils/nms/py_cpu_nms.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # Fast R-CNN
3
+ # Copyright (c) 2015 Microsoft
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # Written by Ross Girshick
6
+ # --------------------------------------------------------
7
+
8
+ import numpy as np
9
+
10
+ def py_cpu_nms(dets, thresh):
11
+ """Pure Python NMS baseline."""
12
+ x1 = dets[:, 0]
13
+ y1 = dets[:, 1]
14
+ x2 = dets[:, 2]
15
+ y2 = dets[:, 3]
16
+ scores = dets[:, 4]
17
+
18
+ areas = (x2 - x1 + 1) * (y2 - y1 + 1)
19
+ order = scores.argsort()[::-1]
20
+
21
+ keep = []
22
+ while order.size > 0:
23
+ i = order[0]
24
+ keep.append(i)
25
+ xx1 = np.maximum(x1[i], x1[order[1:]])
26
+ yy1 = np.maximum(y1[i], y1[order[1:]])
27
+ xx2 = np.minimum(x2[i], x2[order[1:]])
28
+ yy2 = np.minimum(y2[i], y2[order[1:]])
29
+
30
+ w = np.maximum(0.0, xx2 - xx1 + 1)
31
+ h = np.maximum(0.0, yy2 - yy1 + 1)
32
+ inter = w * h
33
+ ovr = inter / (areas[i] + areas[order[1:]] - inter)
34
+
35
+ inds = np.where(ovr <= thresh)[0]
36
+ order = order[inds + 1]
37
+
38
+ return keep
utils/timer.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # Fast R-CNN
3
+ # Copyright (c) 2015 Microsoft
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # Written by Ross Girshick
6
+ # --------------------------------------------------------
7
+
8
+ import time
9
+
10
+
11
+ class Timer(object):
12
+ """A simple timer."""
13
+ def __init__(self):
14
+ self.total_time = 0.
15
+ self.calls = 0
16
+ self.start_time = 0.
17
+ self.diff = 0.
18
+ self.average_time = 0.
19
+
20
+ def tic(self):
21
+ # using time.time instead of time.clock because time time.clock
22
+ # does not normalize for multithreading
23
+ self.start_time = time.time()
24
+
25
+ def toc(self, average=True):
26
+ self.diff = time.time() - self.start_time
27
+ self.total_time += self.diff
28
+ self.calls += 1
29
+ self.average_time = self.total_time / self.calls
30
+ if average:
31
+ return self.average_time
32
+ else:
33
+ return self.diff
34
+
35
+ def clear(self):
36
+ self.total_time = 0.
37
+ self.calls = 0
38
+ self.start_time = 0.
39
+ self.diff = 0.
40
+ self.average_time = 0.
widerface_evaluate/README.md ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # WiderFace-Evaluation
2
+ Python Evaluation Code for [Wider Face Dataset](http://mmlab.ie.cuhk.edu.hk/projects/WIDERFace/)
3
+
4
+
5
+ ## Usage
6
+
7
+
8
+ ##### before evaluating ....
9
+
10
+ ````
11
+ python3 setup.py build_ext --inplace
12
+ ````
13
+
14
+ ##### evaluating
15
+
16
+ **GroungTruth:** `wider_face_val.mat`, `wider_easy_val.mat`, `wider_medium_val.mat`,`wider_hard_val.mat`
17
+
18
+ ````
19
+ python3 evaluation.py -p <your prediction dir> -g <groud truth dir>
20
+ ````
21
+
22
+ ## Bugs & Problems
23
+ please issue
24
+
25
+ ## Acknowledgements
26
+
27
+ some code borrowed from Sergey Karayev
widerface_evaluate/box_overlaps.pyx ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # Fast R-CNN
3
+ # Copyright (c) 2015 Microsoft
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # Written by Sergey Karayev
6
+ # --------------------------------------------------------
7
+
8
+ cimport cython
9
+ import numpy as np
10
+ cimport numpy as np
11
+
12
+ DTYPE = np.float
13
+ ctypedef np.float_t DTYPE_t
14
+
15
+ def bbox_overlaps(
16
+ np.ndarray[DTYPE_t, ndim=2] boxes,
17
+ np.ndarray[DTYPE_t, ndim=2] query_boxes):
18
+ """
19
+ Parameters
20
+ ----------
21
+ boxes: (N, 4) ndarray of float
22
+ query_boxes: (K, 4) ndarray of float
23
+ Returns
24
+ -------
25
+ overlaps: (N, K) ndarray of overlap between boxes and query_boxes
26
+ """
27
+ cdef unsigned int N = boxes.shape[0]
28
+ cdef unsigned int K = query_boxes.shape[0]
29
+ cdef np.ndarray[DTYPE_t, ndim=2] overlaps = np.zeros((N, K), dtype=DTYPE)
30
+ cdef DTYPE_t iw, ih, box_area
31
+ cdef DTYPE_t ua
32
+ cdef unsigned int k, n
33
+ for k in range(K):
34
+ box_area = (
35
+ (query_boxes[k, 2] - query_boxes[k, 0] + 1) *
36
+ (query_boxes[k, 3] - query_boxes[k, 1] + 1)
37
+ )
38
+ for n in range(N):
39
+ iw = (
40
+ min(boxes[n, 2], query_boxes[k, 2]) -
41
+ max(boxes[n, 0], query_boxes[k, 0]) + 1
42
+ )
43
+ if iw > 0:
44
+ ih = (
45
+ min(boxes[n, 3], query_boxes[k, 3]) -
46
+ max(boxes[n, 1], query_boxes[k, 1]) + 1
47
+ )
48
+ if ih > 0:
49
+ ua = float(
50
+ (boxes[n, 2] - boxes[n, 0] + 1) *
51
+ (boxes[n, 3] - boxes[n, 1] + 1) +
52
+ box_area - iw * ih
53
+ )
54
+ overlaps[n, k] = iw * ih / ua
55
+ return overlaps
widerface_evaluate/evaluation.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ WiderFace evaluation code
3
+ author: wondervictor
4
+ mail: tianhengcheng@gmail.com
5
+ copyright@wondervictor
6
+ """
7
+
8
+ import os
9
+ import tqdm
10
+ import pickle
11
+ import argparse
12
+ import numpy as np
13
+ from scipy.io import loadmat
14
+ from bbox import bbox_overlaps
15
+ from IPython import embed
16
+
17
+
18
+ def get_gt_boxes(gt_dir):
19
+ """ gt dir: (wider_face_val.mat, wider_easy_val.mat, wider_medium_val.mat, wider_hard_val.mat)"""
20
+
21
+ gt_mat = loadmat(os.path.join(gt_dir, 'wider_face_val.mat'))
22
+ hard_mat = loadmat(os.path.join(gt_dir, 'wider_hard_val.mat'))
23
+ medium_mat = loadmat(os.path.join(gt_dir, 'wider_medium_val.mat'))
24
+ easy_mat = loadmat(os.path.join(gt_dir, 'wider_easy_val.mat'))
25
+
26
+ facebox_list = gt_mat['face_bbx_list']
27
+ event_list = gt_mat['event_list']
28
+ file_list = gt_mat['file_list']
29
+
30
+ hard_gt_list = hard_mat['gt_list']
31
+ medium_gt_list = medium_mat['gt_list']
32
+ easy_gt_list = easy_mat['gt_list']
33
+
34
+ return facebox_list, event_list, file_list, hard_gt_list, medium_gt_list, easy_gt_list
35
+
36
+
37
+ def get_gt_boxes_from_txt(gt_path, cache_dir):
38
+
39
+ cache_file = os.path.join(cache_dir, 'gt_cache.pkl')
40
+ if os.path.exists(cache_file):
41
+ f = open(cache_file, 'rb')
42
+ boxes = pickle.load(f)
43
+ f.close()
44
+ return boxes
45
+
46
+ f = open(gt_path, 'r')
47
+ state = 0
48
+ lines = f.readlines()
49
+ lines = list(map(lambda x: x.rstrip('\r\n'), lines))
50
+ boxes = {}
51
+ print(len(lines))
52
+ f.close()
53
+ current_boxes = []
54
+ current_name = None
55
+ for line in lines:
56
+ if state == 0 and '--' in line:
57
+ state = 1
58
+ current_name = line
59
+ continue
60
+ if state == 1:
61
+ state = 2
62
+ continue
63
+
64
+ if state == 2 and '--' in line:
65
+ state = 1
66
+ boxes[current_name] = np.array(current_boxes).astype('float32')
67
+ current_name = line
68
+ current_boxes = []
69
+ continue
70
+
71
+ if state == 2:
72
+ box = [float(x) for x in line.split(' ')[:4]]
73
+ current_boxes.append(box)
74
+ continue
75
+
76
+ f = open(cache_file, 'wb')
77
+ pickle.dump(boxes, f)
78
+ f.close()
79
+ return boxes
80
+
81
+
82
+ def read_pred_file(filepath):
83
+
84
+ with open(filepath, 'r') as f:
85
+ lines = f.readlines()
86
+ img_file = lines[0].rstrip('\n\r')
87
+ lines = lines[2:]
88
+
89
+ # b = lines[0].rstrip('\r\n').split(' ')[:-1]
90
+ # c = float(b)
91
+ # a = map(lambda x: [[float(a[0]), float(a[1]), float(a[2]), float(a[3]), float(a[4])] for a in x.rstrip('\r\n').split(' ')], lines)
92
+ boxes = []
93
+ for line in lines:
94
+ line = line.rstrip('\r\n').split(' ')
95
+ if line[0] is '':
96
+ continue
97
+ # a = float(line[4])
98
+ boxes.append([float(line[0]), float(line[1]), float(line[2]), float(line[3]), float(line[4])])
99
+ boxes = np.array(boxes)
100
+ # boxes = np.array(list(map(lambda x: [float(a) for a in x.rstrip('\r\n').split(' ')], lines))).astype('float')
101
+ return img_file.split('/')[-1], boxes
102
+
103
+
104
+ def get_preds(pred_dir):
105
+ events = os.listdir(pred_dir)
106
+ boxes = dict()
107
+ pbar = tqdm.tqdm(events)
108
+
109
+ for event in pbar:
110
+ pbar.set_description('Reading Predictions ')
111
+ event_dir = os.path.join(pred_dir, event)
112
+ event_images = os.listdir(event_dir)
113
+ current_event = dict()
114
+ for imgtxt in event_images:
115
+ imgname, _boxes = read_pred_file(os.path.join(event_dir, imgtxt))
116
+ current_event[imgname.rstrip('.jpg')] = _boxes
117
+ boxes[event] = current_event
118
+ return boxes
119
+
120
+
121
+ def norm_score(pred):
122
+ """ norm score
123
+ pred {key: [[x1,y1,x2,y2,s]]}
124
+ """
125
+
126
+ max_score = 0
127
+ min_score = 1
128
+
129
+ for _, k in pred.items():
130
+ for _, v in k.items():
131
+ if len(v) == 0:
132
+ continue
133
+ _min = np.min(v[:, -1])
134
+ _max = np.max(v[:, -1])
135
+ max_score = max(_max, max_score)
136
+ min_score = min(_min, min_score)
137
+
138
+ diff = max_score - min_score
139
+ for _, k in pred.items():
140
+ for _, v in k.items():
141
+ if len(v) == 0:
142
+ continue
143
+ v[:, -1] = (v[:, -1] - min_score)/diff
144
+
145
+
146
+ def image_eval(pred, gt, ignore, iou_thresh):
147
+ """ single image evaluation
148
+ pred: Nx5
149
+ gt: Nx4
150
+ ignore:
151
+ """
152
+
153
+ _pred = pred.copy()
154
+ _gt = gt.copy()
155
+ pred_recall = np.zeros(_pred.shape[0])
156
+ recall_list = np.zeros(_gt.shape[0])
157
+ proposal_list = np.ones(_pred.shape[0])
158
+
159
+ _pred[:, 2] = _pred[:, 2] + _pred[:, 0]
160
+ _pred[:, 3] = _pred[:, 3] + _pred[:, 1]
161
+ _gt[:, 2] = _gt[:, 2] + _gt[:, 0]
162
+ _gt[:, 3] = _gt[:, 3] + _gt[:, 1]
163
+
164
+ overlaps = bbox_overlaps(_pred[:, :4], _gt)
165
+
166
+ for h in range(_pred.shape[0]):
167
+
168
+ gt_overlap = overlaps[h]
169
+ max_overlap, max_idx = gt_overlap.max(), gt_overlap.argmax()
170
+ if max_overlap >= iou_thresh:
171
+ if ignore[max_idx] == 0:
172
+ recall_list[max_idx] = -1
173
+ proposal_list[h] = -1
174
+ elif recall_list[max_idx] == 0:
175
+ recall_list[max_idx] = 1
176
+
177
+ r_keep_index = np.where(recall_list == 1)[0]
178
+ pred_recall[h] = len(r_keep_index)
179
+ return pred_recall, proposal_list
180
+
181
+
182
+ def img_pr_info(thresh_num, pred_info, proposal_list, pred_recall):
183
+ pr_info = np.zeros((thresh_num, 2)).astype('float')
184
+ for t in range(thresh_num):
185
+
186
+ thresh = 1 - (t+1)/thresh_num
187
+ r_index = np.where(pred_info[:, 4] >= thresh)[0]
188
+ if len(r_index) == 0:
189
+ pr_info[t, 0] = 0
190
+ pr_info[t, 1] = 0
191
+ else:
192
+ r_index = r_index[-1]
193
+ p_index = np.where(proposal_list[:r_index+1] == 1)[0]
194
+ pr_info[t, 0] = len(p_index)
195
+ pr_info[t, 1] = pred_recall[r_index]
196
+ return pr_info
197
+
198
+
199
+ def dataset_pr_info(thresh_num, pr_curve, count_face):
200
+ _pr_curve = np.zeros((thresh_num, 2))
201
+ for i in range(thresh_num):
202
+ _pr_curve[i, 0] = pr_curve[i, 1] / pr_curve[i, 0]
203
+ _pr_curve[i, 1] = pr_curve[i, 1] / count_face
204
+ return _pr_curve
205
+
206
+
207
+ def voc_ap(rec, prec):
208
+
209
+ # correct AP calculation
210
+ # first append sentinel values at the end
211
+ mrec = np.concatenate(([0.], rec, [1.]))
212
+ mpre = np.concatenate(([0.], prec, [0.]))
213
+
214
+ # compute the precision envelope
215
+ for i in range(mpre.size - 1, 0, -1):
216
+ mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
217
+
218
+ # to calculate area under PR curve, look for points
219
+ # where X axis (recall) changes value
220
+ i = np.where(mrec[1:] != mrec[:-1])[0]
221
+
222
+ # and sum (\Delta recall) * prec
223
+ ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
224
+ return ap
225
+
226
+
227
+ def evaluation(pred, gt_path, iou_thresh=0.5):
228
+ pred = get_preds(pred)
229
+ norm_score(pred)
230
+ facebox_list, event_list, file_list, hard_gt_list, medium_gt_list, easy_gt_list = get_gt_boxes(gt_path)
231
+ event_num = len(event_list)
232
+ thresh_num = 1000
233
+ settings = ['easy', 'medium', 'hard']
234
+ setting_gts = [easy_gt_list, medium_gt_list, hard_gt_list]
235
+ aps = []
236
+ for setting_id in range(3):
237
+ # different setting
238
+ gt_list = setting_gts[setting_id]
239
+ count_face = 0
240
+ pr_curve = np.zeros((thresh_num, 2)).astype('float')
241
+ # [hard, medium, easy]
242
+ pbar = tqdm.tqdm(range(event_num))
243
+ for i in pbar:
244
+ pbar.set_description('Processing {}'.format(settings[setting_id]))
245
+ event_name = str(event_list[i][0][0])
246
+ img_list = file_list[i][0]
247
+ pred_list = pred[event_name]
248
+ sub_gt_list = gt_list[i][0]
249
+ # img_pr_info_list = np.zeros((len(img_list), thresh_num, 2))
250
+ gt_bbx_list = facebox_list[i][0]
251
+
252
+ for j in range(len(img_list)):
253
+ pred_info = pred_list[str(img_list[j][0][0])]
254
+
255
+ gt_boxes = gt_bbx_list[j][0].astype('float')
256
+ keep_index = sub_gt_list[j][0]
257
+ count_face += len(keep_index)
258
+
259
+ if len(gt_boxes) == 0 or len(pred_info) == 0:
260
+ continue
261
+ ignore = np.zeros(gt_boxes.shape[0])
262
+ if len(keep_index) != 0:
263
+ ignore[keep_index-1] = 1
264
+ pred_recall, proposal_list = image_eval(pred_info, gt_boxes, ignore, iou_thresh)
265
+
266
+ _img_pr_info = img_pr_info(thresh_num, pred_info, proposal_list, pred_recall)
267
+
268
+ pr_curve += _img_pr_info
269
+ pr_curve = dataset_pr_info(thresh_num, pr_curve, count_face)
270
+
271
+ propose = pr_curve[:, 0]
272
+ recall = pr_curve[:, 1]
273
+
274
+ ap = voc_ap(recall, propose)
275
+ aps.append(ap)
276
+
277
+ print("==================== Results ====================")
278
+ print("Easy Val AP: {}".format(aps[0]))
279
+ print("Medium Val AP: {}".format(aps[1]))
280
+ print("Hard Val AP: {}".format(aps[2]))
281
+ print("=================================================")
282
+
283
+
284
+ if __name__ == '__main__':
285
+
286
+ parser = argparse.ArgumentParser()
287
+ parser.add_argument('-p', '--pred', default="./widerface_txt/")
288
+ parser.add_argument('-g', '--gt', default='./ground_truth/')
289
+
290
+ args = parser.parse_args()
291
+ evaluation(args.pred, args.gt)
292
+
293
+
294
+
295
+
296
+
297
+
298
+
299
+
300
+
301
+
302
+
303
+
widerface_evaluate/ground_truth/wider_easy_val.mat ADDED
Binary file (409 kB). View file
 
widerface_evaluate/ground_truth/wider_face_val.mat ADDED
Binary file (398 kB). View file
 
widerface_evaluate/ground_truth/wider_hard_val.mat ADDED
Binary file (424 kB). View file
 
widerface_evaluate/ground_truth/wider_medium_val.mat ADDED
Binary file (413 kB). View file
 
widerface_evaluate/setup.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ WiderFace evaluation code
3
+ author: wondervictor
4
+ mail: tianhengcheng@gmail.com
5
+ copyright@wondervictor
6
+ """
7
+
8
+ from distutils.core import setup, Extension
9
+ from Cython.Build import cythonize
10
+ import numpy
11
+
12
+ package = Extension('bbox', ['box_overlaps.pyx'], include_dirs=[numpy.get_include()])
13
+ setup(ext_modules=cythonize([package]))