datnguyentien204
commited on
Commit
•
e3c6fb8
1
Parent(s):
1cf023e
ef0f50f55c4c075bf1cc2c887bb665b844a208e72c4740ad66b9472bb5b22060
Browse files- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_492.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_518.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_520.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_521.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_552.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_563.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_571.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_594.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_595.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_60.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_607.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_613.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_615.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_632.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_636.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_648.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_655.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_658.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_66.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_693.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_710.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_74.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_748.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_757.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_767.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_784.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_828.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_849.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_872.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_883.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_89.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_907.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_924.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_930.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_933.jpg +3 -0
- dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_945.jpg +3 -0
- dataset/widerface/widerface/train/timer.py +40 -0
- dataset/widerface/widerface/train/wider_val.txt +0 -0
- detect.py +170 -0
- layers/__init__.py +2 -0
- layers/__pycache__/__init__.cpython-38.pyc +0 -0
- layers/functions/__pycache__/prior_box.cpython-38.pyc +0 -0
- layers/functions/prior_box.py +34 -0
- layers/modules/__init__.py +3 -0
- layers/modules/__pycache__/__init__.cpython-38.pyc +0 -0
- layers/modules/__pycache__/multibox_loss.cpython-38.pyc +0 -0
- layers/modules/multibox_loss.py +125 -0
- models/__init__.py +0 -0
- models/__pycache__/__init__.cpython-38.pyc +0 -0
- models/__pycache__/net.cpython-38.pyc +0 -0
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_492.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_518.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_520.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_521.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_552.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_563.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_571.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_594.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_595.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_60.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_607.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_613.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_615.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_632.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_636.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_648.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_655.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_658.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_66.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_693.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_710.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_74.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_748.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_757.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_767.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_784.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_828.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_849.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_872.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_883.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_89.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_907.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_924.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_930.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_933.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/images/9--Press_Conference/9_Press_Conference_Press_Conference_9_945.jpg
ADDED
Git LFS Details
|
dataset/widerface/widerface/train/timer.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# --------------------------------------------------------
|
2 |
+
# Fast R-CNN
|
3 |
+
# Copyright (c) 2015 Microsoft
|
4 |
+
# Licensed under The MIT License [see LICENSE for details]
|
5 |
+
# Written by Ross Girshick
|
6 |
+
# --------------------------------------------------------
|
7 |
+
|
8 |
+
import time
|
9 |
+
|
10 |
+
|
11 |
+
class Timer(object):
|
12 |
+
"""A simple timer."""
|
13 |
+
def __init__(self):
|
14 |
+
self.total_time = 0.
|
15 |
+
self.calls = 0
|
16 |
+
self.start_time = 0.
|
17 |
+
self.diff = 0.
|
18 |
+
self.average_time = 0.
|
19 |
+
|
20 |
+
def tic(self):
|
21 |
+
# using time.time instead of time.clock because time time.clock
|
22 |
+
# does not normalize for multithreading
|
23 |
+
self.start_time = time.time()
|
24 |
+
|
25 |
+
def toc(self, average=True):
|
26 |
+
self.diff = time.time() - self.start_time
|
27 |
+
self.total_time += self.diff
|
28 |
+
self.calls += 1
|
29 |
+
self.average_time = self.total_time / self.calls
|
30 |
+
if average:
|
31 |
+
return self.average_time
|
32 |
+
else:
|
33 |
+
return self.diff
|
34 |
+
|
35 |
+
def clear(self):
|
36 |
+
self.total_time = 0.
|
37 |
+
self.calls = 0
|
38 |
+
self.start_time = 0.
|
39 |
+
self.diff = 0.
|
40 |
+
self.average_time = 0.
|
dataset/widerface/widerface/train/wider_val.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
detect.py
ADDED
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import print_function
|
2 |
+
import os
|
3 |
+
import argparse
|
4 |
+
import torch
|
5 |
+
import torch.backends.cudnn as cudnn
|
6 |
+
import numpy as np
|
7 |
+
from data import cfg_mnet, cfg_re50
|
8 |
+
from layers.functions.prior_box import PriorBox
|
9 |
+
from utils.nms.py_cpu_nms import py_cpu_nms
|
10 |
+
import cv2
|
11 |
+
from models.retinaface import RetinaFace
|
12 |
+
from utils.box_utils import decode, decode_landm
|
13 |
+
import time
|
14 |
+
|
15 |
+
parser = argparse.ArgumentParser(description='Retinaface')
|
16 |
+
|
17 |
+
parser.add_argument('-m', '--trained_model', default='Retinaface_model_v2/mobilenet0.25_Final.pth',
|
18 |
+
type=str, help='Trained state_dict file path to open')
|
19 |
+
parser.add_argument('--network', default='mobile0.25', help='Backbone network mobile0.25 or resnet50')
|
20 |
+
parser.add_argument('--cpu', action="store_true", default=True, help='Use cpu inference')
|
21 |
+
parser.add_argument('--confidence_threshold', default=0.02, type=float, help='confidence_threshold')
|
22 |
+
parser.add_argument('--top_k', default=5000, type=int, help='top_k')
|
23 |
+
parser.add_argument('--nms_threshold', default=0.4, type=float, help='nms_threshold')
|
24 |
+
parser.add_argument('--keep_top_k', default=750, type=int, help='keep_top_k')
|
25 |
+
parser.add_argument('-s', '--save_image', action="store_true", default=True, help='show detection results')
|
26 |
+
parser.add_argument('--vis_thres', default=0.6, type=float, help='visualization_threshold')
|
27 |
+
args = parser.parse_args()
|
28 |
+
|
29 |
+
|
30 |
+
def check_keys(model, pretrained_state_dict):
|
31 |
+
ckpt_keys = set(pretrained_state_dict.keys())
|
32 |
+
model_keys = set(model.state_dict().keys())
|
33 |
+
used_pretrained_keys = model_keys & ckpt_keys
|
34 |
+
unused_pretrained_keys = ckpt_keys - model_keys
|
35 |
+
missing_keys = model_keys - ckpt_keys
|
36 |
+
print('Missing keys:{}'.format(len(missing_keys)))
|
37 |
+
print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))
|
38 |
+
print('Used keys:{}'.format(len(used_pretrained_keys)))
|
39 |
+
assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'
|
40 |
+
return True
|
41 |
+
|
42 |
+
|
43 |
+
def remove_prefix(state_dict, prefix):
|
44 |
+
''' Old style model is stored with all names of parameters sharing common prefix 'module.' '''
|
45 |
+
print('remove prefix \'{}\''.format(prefix))
|
46 |
+
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
|
47 |
+
return {f(key): value for key, value in state_dict.items()}
|
48 |
+
|
49 |
+
|
50 |
+
def load_model(model, pretrained_path, load_to_cpu):
|
51 |
+
print('Loading pretrained model from {}'.format(pretrained_path))
|
52 |
+
if load_to_cpu:
|
53 |
+
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
|
54 |
+
else:
|
55 |
+
device = torch.cuda.current_device()
|
56 |
+
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))
|
57 |
+
if "state_dict" in pretrained_dict.keys():
|
58 |
+
pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')
|
59 |
+
else:
|
60 |
+
pretrained_dict = remove_prefix(pretrained_dict, 'module.')
|
61 |
+
check_keys(model, pretrained_dict)
|
62 |
+
model.load_state_dict(pretrained_dict, strict=False)
|
63 |
+
return model
|
64 |
+
|
65 |
+
|
66 |
+
if __name__ == '__main__':
|
67 |
+
torch.set_grad_enabled(False)
|
68 |
+
cfg = None
|
69 |
+
if args.network == "mobile0.25":
|
70 |
+
cfg = cfg_mnet
|
71 |
+
elif args.network == "resnet50":
|
72 |
+
cfg = cfg_re50
|
73 |
+
|
74 |
+
net = RetinaFace(cfg=cfg, phase='test')
|
75 |
+
net = load_model(net, args.trained_model, args.cpu)
|
76 |
+
net.eval()
|
77 |
+
print('Finished loading model!')
|
78 |
+
cudnn.benchmark = True
|
79 |
+
device = torch.device("cpu" if args.cpu else "cuda")
|
80 |
+
net = net.to(device)
|
81 |
+
|
82 |
+
resize = 1
|
83 |
+
|
84 |
+
cap = cv2.VideoCapture(0)
|
85 |
+
|
86 |
+
if not cap.isOpened():
|
87 |
+
print("Không thể mở camera")
|
88 |
+
exit()
|
89 |
+
|
90 |
+
while True:
|
91 |
+
ret, frame = cap.read() # Đọc khung hình từ camera
|
92 |
+
if not ret:
|
93 |
+
print("Không thể nhận khung hình. Đang thoát...")
|
94 |
+
break
|
95 |
+
|
96 |
+
img_raw = frame.copy()
|
97 |
+
|
98 |
+
img = np.float32(frame)
|
99 |
+
im_height, im_width, _ = img.shape
|
100 |
+
scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
|
101 |
+
img -= (104, 117, 123)
|
102 |
+
img = img.transpose(2, 0, 1)
|
103 |
+
img = torch.from_numpy(img).unsqueeze(0)
|
104 |
+
img = img.to(device)
|
105 |
+
scale = scale.to(device)
|
106 |
+
|
107 |
+
tic = time.time()
|
108 |
+
loc, conf, landms = net(img) # forward pass
|
109 |
+
print('net forward time: {:.4f}'.format(time.time() - tic))
|
110 |
+
|
111 |
+
priorbox = PriorBox(cfg, image_size=(im_height, im_width))
|
112 |
+
priors = priorbox.forward()
|
113 |
+
priors = priors.to(device)
|
114 |
+
prior_data = priors.data
|
115 |
+
boxes = decode(loc.data.squeeze(0), prior_data, cfg['variance'])
|
116 |
+
boxes = boxes * scale / resize
|
117 |
+
boxes = boxes.cpu().numpy()
|
118 |
+
scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
|
119 |
+
landms = decode_landm(landms.data.squeeze(0), prior_data, cfg['variance'])
|
120 |
+
scale1 = torch.Tensor([img.shape[3], img.shape[2], img.shape[3], img.shape[2],
|
121 |
+
img.shape[3], img.shape[2], img.shape[3], img.shape[2],
|
122 |
+
img.shape[3], img.shape[2]])
|
123 |
+
scale1 = scale1.to(device)
|
124 |
+
landms = landms * scale1 / resize
|
125 |
+
landms = landms.cpu().numpy()
|
126 |
+
|
127 |
+
inds = np.where(scores > args.confidence_threshold)[0]
|
128 |
+
boxes = boxes[inds]
|
129 |
+
landms = landms[inds]
|
130 |
+
scores = scores[inds]
|
131 |
+
|
132 |
+
order = scores.argsort()[::-1][:args.top_k]
|
133 |
+
boxes = boxes[order]
|
134 |
+
landms = landms[order]
|
135 |
+
scores = scores[order]
|
136 |
+
|
137 |
+
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
|
138 |
+
keep = py_cpu_nms(dets, args.nms_threshold)
|
139 |
+
dets = dets[keep, :]
|
140 |
+
landms = landms[keep]
|
141 |
+
|
142 |
+
dets = dets[:args.keep_top_k, :]
|
143 |
+
landms = landms[:args.keep_top_k, :]
|
144 |
+
|
145 |
+
dets = np.concatenate((dets, landms), axis=1)
|
146 |
+
|
147 |
+
for b in dets:
|
148 |
+
if b[4] < args.vis_thres:
|
149 |
+
continue
|
150 |
+
text = "{:.4f}".format(b[4])
|
151 |
+
b = list(map(int, b))
|
152 |
+
cv2.rectangle(img_raw, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 2)
|
153 |
+
cx = b[0]
|
154 |
+
cy = b[1] + 12
|
155 |
+
cv2.putText(img_raw, text, (cx, cy),
|
156 |
+
cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))
|
157 |
+
|
158 |
+
cv2.circle(img_raw, (b[5], b[6]), 1, (0, 0, 255), 4)
|
159 |
+
cv2.circle(img_raw, (b[7], b[8]), 1, (0, 255, 255), 4)
|
160 |
+
cv2.circle(img_raw, (b[9], b[10]), 1, (255, 0, 255), 4)
|
161 |
+
cv2.circle(img_raw, (b[11], b[12]), 1, (0, 255, 0), 4)
|
162 |
+
cv2.circle(img_raw, (b[13], b[14]), 1, (255, 0, 0), 4)
|
163 |
+
|
164 |
+
cv2.imshow('RetinaFace Detection', img_raw)
|
165 |
+
|
166 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
167 |
+
break
|
168 |
+
|
169 |
+
cap.release()
|
170 |
+
cv2.destroyAllWindows()
|
layers/__init__.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
from .functions import *
|
2 |
+
from .modules import *
|
layers/__pycache__/__init__.cpython-38.pyc
ADDED
Binary file (194 Bytes). View file
|
|
layers/functions/__pycache__/prior_box.cpython-38.pyc
ADDED
Binary file (1.85 kB). View file
|
|
layers/functions/prior_box.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from itertools import product as product
|
3 |
+
import numpy as np
|
4 |
+
from math import ceil
|
5 |
+
|
6 |
+
|
7 |
+
class PriorBox(object):
|
8 |
+
def __init__(self, cfg, image_size=None, phase='train'):
|
9 |
+
super(PriorBox, self).__init__()
|
10 |
+
self.min_sizes = cfg['min_sizes']
|
11 |
+
self.steps = cfg['steps']
|
12 |
+
self.clip = cfg['clip']
|
13 |
+
self.image_size = image_size
|
14 |
+
self.feature_maps = [[ceil(self.image_size[0]/step), ceil(self.image_size[1]/step)] for step in self.steps]
|
15 |
+
self.name = "s"
|
16 |
+
|
17 |
+
def forward(self):
|
18 |
+
anchors = []
|
19 |
+
for k, f in enumerate(self.feature_maps):
|
20 |
+
min_sizes = self.min_sizes[k]
|
21 |
+
for i, j in product(range(f[0]), range(f[1])):
|
22 |
+
for min_size in min_sizes:
|
23 |
+
s_kx = min_size / self.image_size[1]
|
24 |
+
s_ky = min_size / self.image_size[0]
|
25 |
+
dense_cx = [x * self.steps[k] / self.image_size[1] for x in [j + 0.5]]
|
26 |
+
dense_cy = [y * self.steps[k] / self.image_size[0] for y in [i + 0.5]]
|
27 |
+
for cy, cx in product(dense_cy, dense_cx):
|
28 |
+
anchors += [cx, cy, s_kx, s_ky]
|
29 |
+
|
30 |
+
# back to torch land
|
31 |
+
output = torch.Tensor(anchors).view(-1, 4)
|
32 |
+
if self.clip:
|
33 |
+
output.clamp_(max=1, min=0)
|
34 |
+
return output
|
layers/modules/__init__.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
from .multibox_loss import MultiBoxLoss
|
2 |
+
|
3 |
+
__all__ = ['MultiBoxLoss']
|
layers/modules/__pycache__/__init__.cpython-38.pyc
ADDED
Binary file (229 Bytes). View file
|
|
layers/modules/__pycache__/multibox_loss.cpython-38.pyc
ADDED
Binary file (4.25 kB). View file
|
|
layers/modules/multibox_loss.py
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import torch.nn.functional as F
|
4 |
+
from torch.autograd import Variable
|
5 |
+
from utils.box_utils import match, log_sum_exp
|
6 |
+
from data import cfg_mnet
|
7 |
+
GPU = cfg_mnet['gpu_train']
|
8 |
+
|
9 |
+
class MultiBoxLoss(nn.Module):
|
10 |
+
"""SSD Weighted Loss Function
|
11 |
+
Compute Targets:
|
12 |
+
1) Produce Confidence Target Indices by matching ground truth boxes
|
13 |
+
with (default) 'priorboxes' that have jaccard index > threshold parameter
|
14 |
+
(default threshold: 0.5).
|
15 |
+
2) Produce localization target by 'encoding' variance into offsets of ground
|
16 |
+
truth boxes and their matched 'priorboxes'.
|
17 |
+
3) Hard negative mining to filter the excessive number of negative examples
|
18 |
+
that comes with using a large number of default bounding boxes.
|
19 |
+
(default negative:positive ratio 3:1)
|
20 |
+
Objective Loss:
|
21 |
+
L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
|
22 |
+
Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss
|
23 |
+
weighted by α which is set to 1 by cross val.
|
24 |
+
Args:
|
25 |
+
c: class confidences,
|
26 |
+
l: predicted boxes,
|
27 |
+
g: ground truth boxes
|
28 |
+
N: number of matched default boxes
|
29 |
+
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
|
30 |
+
"""
|
31 |
+
|
32 |
+
def __init__(self, num_classes, overlap_thresh, prior_for_matching, bkg_label, neg_mining, neg_pos, neg_overlap, encode_target):
|
33 |
+
super(MultiBoxLoss, self).__init__()
|
34 |
+
self.num_classes = num_classes
|
35 |
+
self.threshold = overlap_thresh
|
36 |
+
self.background_label = bkg_label
|
37 |
+
self.encode_target = encode_target
|
38 |
+
self.use_prior_for_matching = prior_for_matching
|
39 |
+
self.do_neg_mining = neg_mining
|
40 |
+
self.negpos_ratio = neg_pos
|
41 |
+
self.neg_overlap = neg_overlap
|
42 |
+
self.variance = [0.1, 0.2]
|
43 |
+
|
44 |
+
def forward(self, predictions, priors, targets):
|
45 |
+
"""Multibox Loss
|
46 |
+
Args:
|
47 |
+
predictions (tuple): A tuple containing loc preds, conf preds,
|
48 |
+
and prior boxes from SSD net.
|
49 |
+
conf shape: torch.size(batch_size,num_priors,num_classes)
|
50 |
+
loc shape: torch.size(batch_size,num_priors,4)
|
51 |
+
priors shape: torch.size(num_priors,4)
|
52 |
+
|
53 |
+
ground_truth (tensor): Ground truth boxes and labels for a batch,
|
54 |
+
shape: [batch_size,num_objs,5] (last idx is the label).
|
55 |
+
"""
|
56 |
+
|
57 |
+
loc_data, conf_data, landm_data = predictions
|
58 |
+
priors = priors
|
59 |
+
num = loc_data.size(0)
|
60 |
+
num_priors = (priors.size(0))
|
61 |
+
|
62 |
+
# match priors (default boxes) and ground truth boxes
|
63 |
+
loc_t = torch.Tensor(num, num_priors, 4)
|
64 |
+
landm_t = torch.Tensor(num, num_priors, 10)
|
65 |
+
conf_t = torch.LongTensor(num, num_priors)
|
66 |
+
for idx in range(num):
|
67 |
+
truths = targets[idx][:, :4].data
|
68 |
+
labels = targets[idx][:, -1].data
|
69 |
+
landms = targets[idx][:, 4:14].data
|
70 |
+
defaults = priors.data
|
71 |
+
match(self.threshold, truths, defaults, self.variance, labels, landms, loc_t, conf_t, landm_t, idx)
|
72 |
+
if GPU:
|
73 |
+
loc_t = loc_t.cuda()
|
74 |
+
conf_t = conf_t.cuda()
|
75 |
+
landm_t = landm_t.cuda()
|
76 |
+
|
77 |
+
zeros = torch.tensor(0).cuda()
|
78 |
+
# landm Loss (Smooth L1)
|
79 |
+
# Shape: [batch,num_priors,10]
|
80 |
+
pos1 = conf_t > zeros
|
81 |
+
num_pos_landm = pos1.long().sum(1, keepdim=True)
|
82 |
+
N1 = max(num_pos_landm.data.sum().float(), 1)
|
83 |
+
pos_idx1 = pos1.unsqueeze(pos1.dim()).expand_as(landm_data)
|
84 |
+
landm_p = landm_data[pos_idx1].view(-1, 10)
|
85 |
+
landm_t = landm_t[pos_idx1].view(-1, 10)
|
86 |
+
loss_landm = F.smooth_l1_loss(landm_p, landm_t, reduction='sum')
|
87 |
+
|
88 |
+
|
89 |
+
pos = conf_t != zeros
|
90 |
+
conf_t[pos] = 1
|
91 |
+
|
92 |
+
# Localization Loss (Smooth L1)
|
93 |
+
# Shape: [batch,num_priors,4]
|
94 |
+
pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data)
|
95 |
+
loc_p = loc_data[pos_idx].view(-1, 4)
|
96 |
+
loc_t = loc_t[pos_idx].view(-1, 4)
|
97 |
+
loss_l = F.smooth_l1_loss(loc_p, loc_t, reduction='sum')
|
98 |
+
|
99 |
+
# Compute max conf across batch for hard negative mining
|
100 |
+
batch_conf = conf_data.view(-1, self.num_classes)
|
101 |
+
loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1))
|
102 |
+
|
103 |
+
# Hard Negative Mining
|
104 |
+
loss_c[pos.view(-1, 1)] = 0 # filter out pos boxes for now
|
105 |
+
loss_c = loss_c.view(num, -1)
|
106 |
+
_, loss_idx = loss_c.sort(1, descending=True)
|
107 |
+
_, idx_rank = loss_idx.sort(1)
|
108 |
+
num_pos = pos.long().sum(1, keepdim=True)
|
109 |
+
num_neg = torch.clamp(self.negpos_ratio*num_pos, max=pos.size(1)-1)
|
110 |
+
neg = idx_rank < num_neg.expand_as(idx_rank)
|
111 |
+
|
112 |
+
# Confidence Loss Including Positive and Negative Examples
|
113 |
+
pos_idx = pos.unsqueeze(2).expand_as(conf_data)
|
114 |
+
neg_idx = neg.unsqueeze(2).expand_as(conf_data)
|
115 |
+
conf_p = conf_data[(pos_idx+neg_idx).gt(0)].view(-1,self.num_classes)
|
116 |
+
targets_weighted = conf_t[(pos+neg).gt(0)]
|
117 |
+
loss_c = F.cross_entropy(conf_p, targets_weighted, reduction='sum')
|
118 |
+
|
119 |
+
# Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
|
120 |
+
N = max(num_pos.data.sum().float(), 1)
|
121 |
+
loss_l /= N
|
122 |
+
loss_c /= N
|
123 |
+
loss_landm /= N1
|
124 |
+
|
125 |
+
return loss_l, loss_c, loss_landm
|
models/__init__.py
ADDED
File without changes
|
models/__pycache__/__init__.cpython-38.pyc
ADDED
Binary file (149 Bytes). View file
|
|
models/__pycache__/net.cpython-38.pyc
ADDED
Binary file (4.14 kB). View file
|
|