gavinyuan commited on
Commit
b9be4e6
1 Parent(s): ee36df0

add: PIPNet, arcface

Browse files

update: gpu or cpu mode

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. app.py +151 -150
  2. inference/alignment.py +245 -0
  3. inference/landmark_smooth.py +117 -0
  4. inference/tricks.py +8 -8
  5. inference/utils.py +133 -0
  6. third_party/GPEN/infer_image.py +4 -2
  7. third_party/PIPNet/FaceBoxesV2/detector.py +39 -0
  8. third_party/PIPNet/FaceBoxesV2/faceboxes_detector.py +124 -0
  9. third_party/PIPNet/FaceBoxesV2/utils/__init__.py +0 -0
  10. third_party/PIPNet/FaceBoxesV2/utils/box_utils.py +276 -0
  11. third_party/PIPNet/FaceBoxesV2/utils/build.py +57 -0
  12. third_party/PIPNet/FaceBoxesV2/utils/config.py +14 -0
  13. third_party/PIPNet/FaceBoxesV2/utils/faceboxes.py +239 -0
  14. third_party/PIPNet/FaceBoxesV2/utils/make.sh +3 -0
  15. third_party/PIPNet/FaceBoxesV2/utils/nms/__init__.py +0 -0
  16. third_party/PIPNet/FaceBoxesV2/utils/nms/cpu_nms.c +0 -0
  17. third_party/PIPNet/FaceBoxesV2/utils/nms/cpu_nms.cpython-36m-x86_64-linux-gnu.so +0 -0
  18. third_party/PIPNet/FaceBoxesV2/utils/nms/cpu_nms.cpython-38-x86_64-linux-gnu.so +0 -0
  19. third_party/PIPNet/FaceBoxesV2/utils/nms/cpu_nms.pyx +163 -0
  20. third_party/PIPNet/FaceBoxesV2/utils/nms/gpu_nms.hpp +2 -0
  21. third_party/PIPNet/FaceBoxesV2/utils/nms/gpu_nms.pyx +31 -0
  22. third_party/PIPNet/FaceBoxesV2/utils/nms/nms_kernel.cu +144 -0
  23. third_party/PIPNet/FaceBoxesV2/utils/nms/py_cpu_nms.py +38 -0
  24. third_party/PIPNet/FaceBoxesV2/utils/nms_wrapper.py +15 -0
  25. third_party/PIPNet/FaceBoxesV2/utils/prior_box.py +43 -0
  26. third_party/PIPNet/FaceBoxesV2/utils/timer.py +40 -0
  27. third_party/PIPNet/LICENSE +21 -0
  28. third_party/PIPNet/README.md +153 -0
  29. third_party/PIPNet/lib/data_utils.py +166 -0
  30. third_party/PIPNet/lib/data_utils_gssl.py +290 -0
  31. third_party/PIPNet/lib/demo.py +159 -0
  32. third_party/PIPNet/lib/demo_video.py +141 -0
  33. third_party/PIPNet/lib/functions.py +210 -0
  34. third_party/PIPNet/lib/functions_gssl.py +241 -0
  35. third_party/PIPNet/lib/mobilenetv3.py +233 -0
  36. third_party/PIPNet/lib/networks.py +415 -0
  37. third_party/PIPNet/lib/networks_gssl.py +80 -0
  38. third_party/PIPNet/lib/preprocess.py +554 -0
  39. third_party/PIPNet/lib/preprocess_gssl.py +544 -0
  40. third_party/PIPNet/lib/tools.py +174 -0
  41. third_party/PIPNet/lib/train.py +196 -0
  42. third_party/PIPNet/lib/train_gssl.py +303 -0
  43. third_party/PIPNet/requirements.txt +3 -0
  44. third_party/PIPNet/reverse_index.py +3338 -0
  45. third_party/PIPNet/run_demo.sh +11 -0
  46. third_party/PIPNet/run_test.sh +34 -0
  47. third_party/PIPNet/run_train.sh +33 -0
  48. weights/PIPNet/FaceBoxesV2.pth +3 -0
  49. weights/PIPNet/epoch59.pth +3 -0
  50. weights/arcface/mouth_net_28_56_84_112.pth +3 -0
app.py CHANGED
@@ -15,156 +15,156 @@ from PIL import Image
15
  import tqdm
16
 
17
  from modules.networks.faceshifter import FSGenerator
18
- # from inference.alignment import norm_crop, norm_crop_with_M, paste_back
19
- # from inference.utils import save, get_5_from_98, get_detector, get_lmk
20
- # from inference.PIPNet.lib.tools import get_lmk_model, demo_image
21
- # from inference.landmark_smooth import kalman_filter_landmark, savgol_filter_landmark
22
  from inference.tricks import Trick
23
 
24
- # make_abs_path = lambda fn: os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), fn))
25
- #
26
- #
27
- # fs_model_name = 'faceshifter'
28
- # in_size = 512
29
- #
30
- # mouth_net_param = {
31
- # "use": True,
32
- # "feature_dim": 128,
33
- # "crop_param": (28, 56, 84, 112),
34
- # "weight_path": "../../modules/third_party/arcface/weights/mouth_net_28_56_84_112.pth",
35
- # }
36
- # trick = Trick()
37
- #
38
- # T = transforms.Compose(
39
- # [
40
- # transforms.ToTensor(),
41
- # transforms.Normalize(0.5, 0.5),
42
- # ]
43
- # )
44
- # tensor2pil_transform = transforms.ToPILImage()
45
- #
46
- #
47
- # def extract_generator(ckpt: str, pt: str):
48
- # print(f'[extract_generator] loading ckpt...')
49
- # from trainer.faceshifter.faceshifter_pl import FaceshifterPL512, FaceshifterPL
50
- # import yaml
51
- # with open(make_abs_path('../../trainer/faceshifter/config.yaml'), 'r') as f:
52
- # config = yaml.load(f, Loader=yaml.FullLoader)
53
- # config['mouth_net'] = mouth_net_param
54
- #
55
- # if in_size == 256:
56
- # net = FaceshifterPL(n_layers=3, num_D=3, config=config)
57
- # elif in_size == 512:
58
- # net = FaceshifterPL512(n_layers=3, num_D=3, config=config, verbose=False)
59
- # else:
60
- # raise ValueError('Not supported in_size.')
61
- # checkpoint = torch.load(ckpt, map_location="cpu", )
62
- # net.load_state_dict(checkpoint["state_dict"], strict=False)
63
- # net.eval()
64
- #
65
- # G = net.generator
66
- # torch.save(G.state_dict(), pt)
67
- # print(f'[extract_generator] extracted from {ckpt}, pth saved to {pt}')
68
- #
69
- #
70
- # ''' load model '''
71
- # if fs_model_name == 'faceshifter':
72
- # # pt_path = make_abs_path("../ffplus/extracted_ckpt/G_mouth1_t38.pth")
73
- # # pt_path = make_abs_path("../ffplus/extracted_ckpt/G_mouth1_t512_6.pth")
74
- # # ckpt_path = "/apdcephfs/share_1290939/gavinyuan/out/triplet512_6/epoch=3-step=128999.ckpt"
75
- # pt_path = make_abs_path("../ffplus/extracted_ckpt/G_mouth1_t512_4.pth")
76
- # ckpt_path = "/apdcephfs/share_1290939/gavinyuan/out/triplet512_4/epoch=2-step=185999.ckpt"
77
- # if not os.path.exists(pt_path) or 't512' in pt_path:
78
- # extract_generator(ckpt_path, pt_path)
79
- # fs_model = FSGenerator(
80
- # make_abs_path("../../modules/third_party/arcface/weights/ms1mv3_arcface_r100_fp16/backbone.pth"),
81
- # mouth_net_param=mouth_net_param,
82
- # in_size=in_size,
83
- # downup=in_size == 512,
84
- # )
85
- # fs_model.load_state_dict(torch.load(pt_path, "cpu"), strict=True)
86
- # fs_model.eval()
87
- #
88
- # @torch.no_grad()
89
- # def infer_batch_to_img(i_s, i_t, post: bool = False):
90
- # i_r = fs_model(i_s, i_t)[0] # x, id_vector, att
91
- #
92
- # if post:
93
- # target_hair_mask = trick.get_any_mask(i_t, par=[0, 17])
94
- # target_hair_mask = trick.smooth_mask(target_hair_mask)
95
- # i_r = target_hair_mask * i_t + (target_hair_mask * (-1) + 1) * i_r
96
- # i_r = trick.finetune_mouth(i_s, i_t, i_r) if in_size == 256 else i_r
97
- #
98
- # img_r = trick.tensor_to_arr(i_r)[0]
99
- # return img_r
100
- #
101
- # elif fs_model_name == 'simswap_triplet' or fs_model_name == 'simswap_vanilla':
102
- # from modules.networks.simswap import Generator_Adain_Upsample
103
- # sw_model = Generator_Adain_Upsample(
104
- # input_nc=3, output_nc=3, latent_size=512, n_blocks=9, deep=False,
105
- # mouth_net_param=mouth_net_param
106
- # )
107
- # if fs_model_name == 'simswap_triplet':
108
- # pt_path = make_abs_path("../ffplus/extracted_ckpt/G_mouth1_st5.pth")
109
- # ckpt_path = make_abs_path("/apdcephfs/share_1290939/gavinyuan/out/"
110
- # "simswap_triplet_5/epoch=12-step=782999.ckpt")
111
- # elif fs_model_name == 'simswap_vanilla':
112
- # pt_path = make_abs_path("../ffplus/extracted_ckpt/G_tmp_sv4_off.pth")
113
- # ckpt_path = make_abs_path("/apdcephfs/share_1290939/gavinyuan/out/"
114
- # "simswap_vanilla_4/epoch=694-step=1487999.ckpt")
115
- # else:
116
- # pt_path = None
117
- # ckpt_path = None
118
- # sw_model.load_state_dict(torch.load(pt_path, "cpu"), strict=False)
119
- # sw_model.eval()
120
- # fs_model = sw_model
121
- #
122
- # from trainer.simswap.simswap_pl import SimSwapPL
123
- # import yaml
124
- # with open(make_abs_path('../../trainer/simswap/config.yaml'), 'r') as f:
125
- # config = yaml.load(f, Loader=yaml.FullLoader)
126
- # config['mouth_net'] = mouth_net_param
127
- # net = SimSwapPL(config=config, use_official_arc='off' in pt_path)
128
- #
129
- # checkpoint = torch.load(ckpt_path, map_location="cpu")
130
- # net.load_state_dict(checkpoint["state_dict"], strict=False)
131
- # net.eval()
132
- # sw_mouth_net = net.mouth_net # maybe None
133
- # sw_netArc = net.netArc
134
- # fs_model = fs_model.cuda()
135
- # sw_mouth_net = sw_mouth_net.cuda() if sw_mouth_net is not None else sw_mouth_net
136
- # sw_netArc = sw_netArc.cuda()
137
- #
138
- # @torch.no_grad()
139
- # def infer_batch_to_img(i_s, i_t, post: bool = False):
140
- # i_r = fs_model(source=i_s, target=i_t, net_arc=sw_netArc, mouth_net=sw_mouth_net,)
141
- # if post:
142
- # target_hair_mask = trick.get_any_mask(i_t, par=[0, 17])
143
- # target_hair_mask = trick.smooth_mask(target_hair_mask)
144
- # i_r = target_hair_mask * i_t + (target_hair_mask * (-1) + 1) * i_r
145
- # i_r = i_r.clamp(-1, 1)
146
- # i_r = trick.tensor_to_arr(i_r)[0]
147
- # return i_r
148
- #
149
- # elif fs_model_name == 'simswap_official':
150
- # from simswap.image_infer import SimSwapOfficialImageInfer
151
- # fs_model = SimSwapOfficialImageInfer()
152
- # pt_path = 'Simswap Official'
153
- # mouth_net_param = {
154
- # "use": False
155
- # }
156
- #
157
- # @torch.no_grad()
158
- # def infer_batch_to_img(i_s, i_t):
159
- # i_r = fs_model.image_infer(source_tensor=i_s, target_tensor=i_t)
160
- # i_r = i_r.clamp(-1, 1)
161
- # return i_r
162
- #
163
- # else:
164
- # raise ValueError('Not supported fs_model_name.')
165
- #
166
- #
167
- # print(f'[demo] model loaded from {pt_path}')
168
 
169
 
170
  def swap_image(
@@ -435,6 +435,7 @@ def swap_video_gr(img1, target_path, use_gpu=True, frames=9999999):
435
 
436
 
437
  if __name__ == "__main__":
 
438
  with gr.Blocks() as demo:
439
  gr.Markdown("SuperSwap")
440
 
@@ -458,12 +459,12 @@ if __name__ == "__main__":
458
  video_button = gr.Button("换脸")
459
  image_button.click(
460
  swap_image_gr,
461
- inputs=[image1_input, image2_input, use_post, use_gpen],
462
  outputs=image_output,
463
  )
464
  video_button.click(
465
  swap_video_gr,
466
- inputs=[image3_input, video_input],
467
  outputs=video_output,
468
  )
469
 
 
15
  import tqdm
16
 
17
  from modules.networks.faceshifter import FSGenerator
18
+ from inference.alignment import norm_crop, norm_crop_with_M, paste_back
19
+ from inference.utils import save, get_5_from_98, get_detector, get_lmk
20
+ from third_party.PIPNet.lib.tools import get_lmk_model, demo_image
21
+ from inference.landmark_smooth import kalman_filter_landmark, savgol_filter_landmark
22
  from inference.tricks import Trick
23
 
24
+ make_abs_path = lambda fn: os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), fn))
25
+
26
+
27
+ fs_model_name = 'faceshifter'
28
+ in_size = 256
29
+
30
+ mouth_net_param = {
31
+ "use": True,
32
+ "feature_dim": 128,
33
+ "crop_param": (28, 56, 84, 112),
34
+ "weight_path": "../../modules/third_party/arcface/weights/mouth_net_28_56_84_112.pth",
35
+ }
36
+ trick = Trick()
37
+
38
+ T = transforms.Compose(
39
+ [
40
+ transforms.ToTensor(),
41
+ transforms.Normalize(0.5, 0.5),
42
+ ]
43
+ )
44
+ tensor2pil_transform = transforms.ToPILImage()
45
+
46
+
47
+ def extract_generator(ckpt: str, pt: str):
48
+ print(f'[extract_generator] loading ckpt...')
49
+ from trainer.faceshifter.faceshifter_pl import FaceshifterPL512, FaceshifterPL
50
+ import yaml
51
+ with open(make_abs_path('../../trainer/faceshifter/config.yaml'), 'r') as f:
52
+ config = yaml.load(f, Loader=yaml.FullLoader)
53
+ config['mouth_net'] = mouth_net_param
54
+
55
+ if in_size == 256:
56
+ net = FaceshifterPL(n_layers=3, num_D=3, config=config)
57
+ elif in_size == 512:
58
+ net = FaceshifterPL512(n_layers=3, num_D=3, config=config, verbose=False)
59
+ else:
60
+ raise ValueError('Not supported in_size.')
61
+ checkpoint = torch.load(ckpt, map_location="cpu", )
62
+ net.load_state_dict(checkpoint["state_dict"], strict=False)
63
+ net.eval()
64
+
65
+ G = net.generator
66
+ torch.save(G.state_dict(), pt)
67
+ print(f'[extract_generator] extracted from {ckpt}, pth saved to {pt}')
68
+
69
+
70
+ ''' load model '''
71
+ if fs_model_name == 'faceshifter':
72
+ pt_path = make_abs_path("./weights/extracted/G_mouth1_t38.pth")
73
+ # pt_path = make_abs_path("../ffplus/extracted_ckpt/G_mouth1_t512_6.pth")
74
+ # ckpt_path = "/apdcephfs/share_1290939/gavinyuan/out/triplet512_6/epoch=3-step=128999.ckpt"
75
+ # pt_path = make_abs_path("../ffplus/extracted_ckpt/G_mouth1_t512_4.pth")
76
+ # ckpt_path = "/apdcephfs/share_1290939/gavinyuan/out/triplet512_4/epoch=2-step=185999.ckpt"
77
+ if not os.path.exists(pt_path) or 't512' in pt_path:
78
+ extract_generator(ckpt_path, pt_path)
79
+ fs_model = FSGenerator(
80
+ make_abs_path("./weights/arcface/ms1mv3_arcface_r100_fp16/backbone.pth"),
81
+ mouth_net_param=mouth_net_param,
82
+ in_size=in_size,
83
+ downup=in_size == 512,
84
+ )
85
+ fs_model.load_state_dict(torch.load(pt_path, "cpu"), strict=True)
86
+ fs_model.eval()
87
+
88
+ @torch.no_grad()
89
+ def infer_batch_to_img(i_s, i_t, post: bool = False):
90
+ i_r = fs_model(i_s, i_t)[0] # x, id_vector, att
91
+
92
+ if post:
93
+ target_hair_mask = trick.get_any_mask(i_t, par=[0, 17])
94
+ target_hair_mask = trick.smooth_mask(target_hair_mask)
95
+ i_r = target_hair_mask * i_t + (target_hair_mask * (-1) + 1) * i_r
96
+ i_r = trick.finetune_mouth(i_s, i_t, i_r) if in_size == 256 else i_r
97
+
98
+ img_r = trick.tensor_to_arr(i_r)[0]
99
+ return img_r
100
+
101
+ elif fs_model_name == 'simswap_triplet' or fs_model_name == 'simswap_vanilla':
102
+ from modules.networks.simswap import Generator_Adain_Upsample
103
+ sw_model = Generator_Adain_Upsample(
104
+ input_nc=3, output_nc=3, latent_size=512, n_blocks=9, deep=False,
105
+ mouth_net_param=mouth_net_param
106
+ )
107
+ if fs_model_name == 'simswap_triplet':
108
+ pt_path = make_abs_path("../ffplus/extracted_ckpt/G_mouth1_st5.pth")
109
+ ckpt_path = make_abs_path("/apdcephfs/share_1290939/gavinyuan/out/"
110
+ "simswap_triplet_5/epoch=12-step=782999.ckpt")
111
+ elif fs_model_name == 'simswap_vanilla':
112
+ pt_path = make_abs_path("../ffplus/extracted_ckpt/G_tmp_sv4_off.pth")
113
+ ckpt_path = make_abs_path("/apdcephfs/share_1290939/gavinyuan/out/"
114
+ "simswap_vanilla_4/epoch=694-step=1487999.ckpt")
115
+ else:
116
+ pt_path = None
117
+ ckpt_path = None
118
+ sw_model.load_state_dict(torch.load(pt_path, "cpu"), strict=False)
119
+ sw_model.eval()
120
+ fs_model = sw_model
121
+
122
+ from trainer.simswap.simswap_pl import SimSwapPL
123
+ import yaml
124
+ with open(make_abs_path('../../trainer/simswap/config.yaml'), 'r') as f:
125
+ config = yaml.load(f, Loader=yaml.FullLoader)
126
+ config['mouth_net'] = mouth_net_param
127
+ net = SimSwapPL(config=config, use_official_arc='off' in pt_path)
128
+
129
+ checkpoint = torch.load(ckpt_path, map_location="cpu")
130
+ net.load_state_dict(checkpoint["state_dict"], strict=False)
131
+ net.eval()
132
+ sw_mouth_net = net.mouth_net # maybe None
133
+ sw_netArc = net.netArc
134
+ fs_model = fs_model.cuda()
135
+ sw_mouth_net = sw_mouth_net.cuda() if sw_mouth_net is not None else sw_mouth_net
136
+ sw_netArc = sw_netArc.cuda()
137
+
138
+ @torch.no_grad()
139
+ def infer_batch_to_img(i_s, i_t, post: bool = False):
140
+ i_r = fs_model(source=i_s, target=i_t, net_arc=sw_netArc, mouth_net=sw_mouth_net,)
141
+ if post:
142
+ target_hair_mask = trick.get_any_mask(i_t, par=[0, 17])
143
+ target_hair_mask = trick.smooth_mask(target_hair_mask)
144
+ i_r = target_hair_mask * i_t + (target_hair_mask * (-1) + 1) * i_r
145
+ i_r = i_r.clamp(-1, 1)
146
+ i_r = trick.tensor_to_arr(i_r)[0]
147
+ return i_r
148
+
149
+ elif fs_model_name == 'simswap_official':
150
+ from simswap.image_infer import SimSwapOfficialImageInfer
151
+ fs_model = SimSwapOfficialImageInfer()
152
+ pt_path = 'Simswap Official'
153
+ mouth_net_param = {
154
+ "use": False
155
+ }
156
+
157
+ @torch.no_grad()
158
+ def infer_batch_to_img(i_s, i_t):
159
+ i_r = fs_model.image_infer(source_tensor=i_s, target_tensor=i_t)
160
+ i_r = i_r.clamp(-1, 1)
161
+ return i_r
162
+
163
+ else:
164
+ raise ValueError('Not supported fs_model_name.')
165
+
166
+
167
+ print(f'[demo] model loaded from {pt_path}')
168
 
169
 
170
  def swap_image(
 
435
 
436
 
437
  if __name__ == "__main__":
438
+ use_gpu = torch.cuda.is_available()
439
  with gr.Blocks() as demo:
440
  gr.Markdown("SuperSwap")
441
 
 
459
  video_button = gr.Button("换脸")
460
  image_button.click(
461
  swap_image_gr,
462
+ inputs=[image1_input, image2_input, use_post, use_gpen, use_gpu],
463
  outputs=image_output,
464
  )
465
  video_button.click(
466
  swap_video_gr,
467
+ inputs=[image3_input, video_input, use_gpu],
468
  outputs=video_output,
469
  )
470
 
inference/alignment.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ from skimage import transform as trans
4
+
5
+
6
+ def get_center(points):
7
+ x = [p[0] for p in points]
8
+ y = [p[1] for p in points]
9
+ centroid = (sum(x) / len(points), sum(y) / len(points))
10
+ return np.array([centroid])
11
+
12
+
13
+ def extract_five_lmk(lmk):
14
+ x = lmk[..., :2]
15
+ left_eye = get_center(x[36:42])
16
+ right_eye = get_center(x[42:48])
17
+ nose = x[30:31]
18
+ left_mouth = x[48:49]
19
+ right_mouth = x[54:55]
20
+ x = np.concatenate([left_eye, right_eye, nose, left_mouth, right_mouth], axis=0)
21
+ return x
22
+
23
+
24
+ set1 = np.array(
25
+ [
26
+ [41.125, 50.75],
27
+ [71.75, 49.4375],
28
+ [49.875, 73.0625],
29
+ [45.9375, 87.9375],
30
+ [70.4375, 87.9375],
31
+ ],
32
+ dtype=np.float32,
33
+ )
34
+
35
+ arcface_src = np.array(
36
+ [
37
+ [38.2946, 51.6963],
38
+ [73.5318, 51.5014],
39
+ [56.0252, 71.7366],
40
+ [41.5493, 92.3655],
41
+ [70.7299, 92.2041],
42
+ ],
43
+ dtype=np.float32,
44
+ )
45
+
46
+
47
+ ffhq = np.array(
48
+ [
49
+ [192.98138, 239.94708],
50
+ [318.90277, 240.1936],
51
+ [256.63416, 314.01935],
52
+ [201.26117, 371.41043],
53
+ [313.08905, 371.15118],
54
+ ],
55
+ dtype=np.float32,
56
+ )
57
+
58
+ mtcnn = np.array(
59
+ [
60
+ [40.95041, 52.341854],
61
+ [70.90203, 52.17619],
62
+ [56.02142, 69.376114],
63
+ [43.716904, 86.910675],
64
+ [68.52042, 86.77348],
65
+ ],
66
+ dtype=np.float32,
67
+ )
68
+
69
+ arcface_src = np.expand_dims(arcface_src, axis=0)
70
+ set1 = np.expand_dims(set1, axis=0)
71
+ ffhq = np.expand_dims(ffhq, axis=0)
72
+ mtcnn = np.expand_dims(mtcnn, axis=0)
73
+
74
+
75
+ # lmk is prediction; src is template
76
+ def estimate_norm(lmk, image_size=112, mode="set1"):
77
+ assert lmk.shape == (5, 2)
78
+ tform = trans.SimilarityTransform()
79
+ lmk_tran = np.insert(lmk, 2, values=np.ones(5), axis=1)
80
+ min_M = []
81
+ min_index = []
82
+ min_error = float("inf")
83
+ if mode == "arcface":
84
+ if image_size == 112:
85
+ src = arcface_src
86
+ else:
87
+ src = float(image_size) / 112 * arcface_src
88
+ elif mode == "set1":
89
+ if image_size == 112:
90
+ src = set1
91
+ else:
92
+ src = float(image_size) / 112 * set1
93
+ elif mode == "ffhq":
94
+ if image_size == 512:
95
+ src = ffhq
96
+ else:
97
+ src = float(image_size) / 512 * ffhq
98
+ elif mode == "mtcnn":
99
+ if image_size == 112:
100
+ src = mtcnn
101
+ else:
102
+ src = float(image_size) / 112 * mtcnn
103
+ else:
104
+ print("no mode like {}".format(mode))
105
+ exit()
106
+ for i in np.arange(src.shape[0]):
107
+ tform.estimate(lmk, src[i])
108
+ M = tform.params[0:2, :]
109
+ results = np.dot(M, lmk_tran.T)
110
+ results = results.T
111
+ error = np.sum(np.sqrt(np.sum((results - src[i]) ** 2, axis=1)))
112
+ # print(error)
113
+ if error < min_error:
114
+ min_error = error
115
+ min_M = M
116
+ min_index = i
117
+ return min_M, min_index
118
+
119
+
120
+ def estimate_norm_any(lmk_from, lmk_to, image_size=112):
121
+ tform = trans.SimilarityTransform()
122
+ lmk_tran = np.insert(lmk_from, 2, values=np.ones(5), axis=1)
123
+ min_M = []
124
+ min_index = []
125
+ min_error = float("inf")
126
+ src = lmk_to[np.newaxis, ...]
127
+ for i in np.arange(src.shape[0]):
128
+ tform.estimate(lmk_from, src[i])
129
+ M = tform.params[0:2, :]
130
+ results = np.dot(M, lmk_tran.T)
131
+ results = results.T
132
+ error = np.sum(np.sqrt(np.sum((results - src[i]) ** 2, axis=1)))
133
+ # print(error)
134
+ if error < min_error:
135
+ min_error = error
136
+ min_M = M
137
+ min_index = i
138
+ return min_M, min_index
139
+
140
+
141
+ def norm_crop(img, landmark, image_size=112, mode="arcface", borderValue=0.0):
142
+ M, pose_index = estimate_norm(landmark, image_size, mode)
143
+ warped = cv2.warpAffine(img, M, (image_size, image_size), borderValue=borderValue)
144
+ return warped
145
+
146
+
147
+ def norm_crop_with_M(img, landmark, image_size=112, mode="arcface", borderValue=0.0):
148
+ M, pose_index = estimate_norm(landmark, image_size, mode)
149
+ warped = cv2.warpAffine(img, M, (image_size, image_size), borderValue=borderValue)
150
+ return warped, M
151
+
152
+
153
+ def square_crop(im, S):
154
+ if im.shape[0] > im.shape[1]:
155
+ height = S
156
+ width = int(float(im.shape[1]) / im.shape[0] * S)
157
+ scale = float(S) / im.shape[0]
158
+ else:
159
+ width = S
160
+ height = int(float(im.shape[0]) / im.shape[1] * S)
161
+ scale = float(S) / im.shape[1]
162
+ resized_im = cv2.resize(im, (width, height))
163
+ det_im = np.zeros((S, S, 3), dtype=np.uint8)
164
+ det_im[: resized_im.shape[0], : resized_im.shape[1], :] = resized_im
165
+ return det_im, scale
166
+
167
+
168
+ def transform(data, center, output_size, scale, rotation):
169
+ scale_ratio = scale
170
+ rot = float(rotation) * np.pi / 180.0
171
+ # translation = (output_size/2-center[0]*scale_ratio, output_size/2-center[1]*scale_ratio)
172
+ t1 = trans.SimilarityTransform(scale=scale_ratio)
173
+ cx = center[0] * scale_ratio
174
+ cy = center[1] * scale_ratio
175
+ t2 = trans.SimilarityTransform(translation=(-1 * cx, -1 * cy))
176
+ t3 = trans.SimilarityTransform(rotation=rot)
177
+ t4 = trans.SimilarityTransform(translation=(output_size / 2, output_size / 2))
178
+ t = t1 + t2 + t3 + t4
179
+ M = t.params[0:2]
180
+ cropped = cv2.warpAffine(data, M, (output_size, output_size), borderValue=0.0)
181
+ return cropped, M
182
+
183
+
184
+ def trans_points2d(pts, M):
185
+ new_pts = np.zeros(shape=pts.shape, dtype=np.float32)
186
+ for i in range(pts.shape[0]):
187
+ pt = pts[i]
188
+ new_pt = np.array([pt[0], pt[1], 1.0], dtype=np.float32)
189
+ new_pt = np.dot(M, new_pt)
190
+ # print('new_pt', new_pt.shape, new_pt)
191
+ new_pts[i] = new_pt[0:2]
192
+
193
+ return new_pts
194
+
195
+
196
+ def trans_points3d(pts, M):
197
+ scale = np.sqrt(M[0][0] * M[0][0] + M[0][1] * M[0][1])
198
+ # print(scale)
199
+ new_pts = np.zeros(shape=pts.shape, dtype=np.float32)
200
+ for i in range(pts.shape[0]):
201
+ pt = pts[i]
202
+ new_pt = np.array([pt[0], pt[1], 1.0], dtype=np.float32)
203
+ new_pt = np.dot(M, new_pt)
204
+ # print('new_pt', new_pt.shape, new_pt)
205
+ new_pts[i][0:2] = new_pt[0:2]
206
+ new_pts[i][2] = pts[i][2] * scale
207
+
208
+ return new_pts
209
+
210
+
211
+ def trans_points(pts, M):
212
+ if pts.shape[1] == 2:
213
+ return trans_points2d(pts, M)
214
+ else:
215
+ return trans_points3d(pts, M)
216
+
217
+
218
+ def paste_back(img, mat, ori_img):
219
+ mat_rev = np.zeros([2, 3])
220
+ div1 = mat[0][0] * mat[1][1] - mat[0][1] * mat[1][0]
221
+ mat_rev[0][0] = mat[1][1] / div1
222
+ mat_rev[0][1] = -mat[0][1] / div1
223
+ mat_rev[0][2] = -(mat[0][2] * mat[1][1] - mat[0][1] * mat[1][2]) / div1
224
+ div2 = mat[0][1] * mat[1][0] - mat[0][0] * mat[1][1]
225
+ mat_rev[1][0] = mat[1][0] / div2
226
+ mat_rev[1][1] = -mat[0][0] / div2
227
+ mat_rev[1][2] = -(mat[0][2] * mat[1][0] - mat[0][0] * mat[1][2]) / div2
228
+
229
+ img_shape = (ori_img.shape[1], ori_img.shape[0])
230
+
231
+ img = cv2.warpAffine(img, mat_rev, img_shape)
232
+ img_white = np.full((256, 256), 255, dtype=float)
233
+ img_white = cv2.warpAffine(img_white, mat_rev, img_shape)
234
+ img_white[img_white > 20] = 255
235
+ img_mask = img_white
236
+ kernel = np.ones((40, 40), np.uint8)
237
+ img_mask = cv2.erode(img_mask, kernel, iterations=2)
238
+ kernel_size = (20, 20)
239
+ blur_size = tuple(2 * j + 1 for j in kernel_size)
240
+ img_mask = cv2.GaussianBlur(img_mask, blur_size, 0)
241
+ img_mask /= 255
242
+ img_mask = np.reshape(img_mask, [img_mask.shape[0], img_mask.shape[1], 1])
243
+ ori_img = img_mask * img + (1 - img_mask) * ori_img
244
+ ori_img = ori_img.astype(np.uint8)
245
+ return ori_img
inference/landmark_smooth.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ from scipy.signal import savgol_filter
4
+
5
+ def kalman_filter(inputs: np.array,
6
+ process_noise: float = 0.03,
7
+ measure_noise: float = 0.01,
8
+ ):
9
+ """ OpenCV - Kalman Filter
10
+ https://blog.csdn.net/angelfish91/article/details/61768575
11
+ https://blog.csdn.net/qq_23981335/article/details/82968422
12
+ """
13
+ assert inputs.ndim == 2, "inputs should be 2-dim np.array"
14
+
15
+ '''
16
+ 它有3个输入参数,
17
+ dynam_params:状态空间的维数,这里为2;
18
+ measure_param:测量值的维数,这里也为2;
19
+ control_params:控制向量的维数,默认为0。由于这里该模型中并没有控制变量,因此也为0。
20
+ '''
21
+ kalman = cv2.KalmanFilter(2,2)
22
+
23
+ kalman.measurementMatrix = np.array([[1,0],[0,1]],np.float32)
24
+ kalman.transitionMatrix = np.array([[1,0],[0,1]], np.float32)
25
+ kalman.processNoiseCov = np.array([[1,0],[0,1]], np.float32) * process_noise
26
+ kalman.measurementNoiseCov = np.array([[1,0],[0,1]], np.float32) * measure_noise
27
+ '''
28
+ kalman.measurementNoiseCov为测量系统的协方差矩阵,方差越小,预测结果越接近测量值,
29
+ kalman.processNoiseCov为模型系统的噪声,噪声越大,预测结果越不稳定,越容易接近模型系统预测值,且单步变化越大,
30
+ 相反,若噪声小,则预测结果与上个计算结果相差不大。
31
+ '''
32
+
33
+ kalman.statePre = np.array([[inputs[0][0]],
34
+ [inputs[0][1]]])
35
+
36
+ '''
37
+ Kalman Filtering
38
+ '''
39
+ outputs = np.zeros_like(inputs)
40
+ for i in range(len(inputs)):
41
+ mes = np.reshape(inputs[i,:],(2,1))
42
+
43
+ x = kalman.correct(mes)
44
+
45
+ y = kalman.predict()
46
+ outputs[i] = np.squeeze(y)
47
+ # print (kalman.statePost[0],kalman.statePost[1])
48
+ # print (kalman.statePre[0],kalman.statePre[1])
49
+ # print ('measurement:\t',mes[0],mes[1])
50
+ # print ('correct:\t',x[0],x[1])
51
+ # print ('predict:\t',y[0],y[1])
52
+ # print ('='*30)
53
+
54
+ return outputs
55
+
56
+
57
+ def kalman_filter_landmark(landmarks: np.array,
58
+ process_noise: float = 0.03,
59
+ measure_noise: float = 0.01,
60
+ ):
61
+ """ Kalman Filter for Landmarks
62
+ :param process_noise: large means unstable and close to model predictions
63
+ :param measure_noise: small means close to measurement
64
+ """
65
+ print('[Using Kalman Filter for Landmark Smoothing, process_noise=%f, measure_noise=%f]' %
66
+ (process_noise, measure_noise))
67
+
68
+ '''
69
+ landmarks: (#frames, key, xy)
70
+ '''
71
+ assert landmarks.ndim == 3, 'landmarks should be 3-dim np.array'
72
+ assert landmarks.dtype == 'float32', 'landmarks dtype should be float32'
73
+
74
+ for s1 in range(landmarks.shape[1]):
75
+ landmarks[:, s1] = kalman_filter(landmarks[:, s1],
76
+ process_noise,
77
+ measure_noise)
78
+ return landmarks
79
+
80
+
81
+ def savgol_filter_landmark(landmarks: np.array,
82
+ window_length: int = 25,
83
+ poly_order: int = 2,
84
+ ):
85
+ """ Savgol Filter for Landmarks
86
+ https://blog.csdn.net/kaever/article/details/105520941
87
+ """
88
+ print('[Using Savgol Filter for Landmark Smoothing, window_length=%d, poly_order=%d]' %
89
+ (window_length, poly_order))
90
+
91
+ '''
92
+ landmarks: (#frames, key, xy)
93
+ '''
94
+ assert landmarks.ndim == 3, 'landmarks should be 3-dim np.array'
95
+ assert landmarks.dtype == 'float32', 'landmarks dtype should be float32'
96
+ assert window_length % 2 == 1, 'window_length should be odd'
97
+
98
+ for s1 in range(landmarks.shape[1]):
99
+ for s2 in range(landmarks.shape[2]):
100
+ landmarks[:, s1, s2] = savgol_filter(landmarks[:, s1, s2],
101
+ window_length,
102
+ poly_order)
103
+ return landmarks
104
+
105
+ if __name__ == '__main__':
106
+
107
+ pos = np.array([
108
+ [10, 50],
109
+ [12, 49],
110
+ [11, 52],
111
+ [13, 52.2],
112
+ [12.9, 50]], np.float32)
113
+
114
+ print(pos)
115
+ pos_filtered = kalman_filter(pos)
116
+ print(pos)
117
+ print(pos_filtered)
inference/tricks.py CHANGED
@@ -74,7 +74,7 @@ class Trick(object):
74
  if not use_gpen:
75
  return img_np
76
  if self.gpen_model is None:
77
- self.gpen_model = GPENImageInfer()
78
  img_np = self.gpen_model.image_infer(img_np)
79
  return img_np
80
 
@@ -139,22 +139,22 @@ class SoftErosion(nn.Module):
139
 
140
 
141
  if torch.cuda.is_available():
142
- device = torch.device(0)
143
  else:
144
- device = torch.device('cpu')
145
  vgg_mean = torch.tensor([[[0.485]], [[0.456]], [[0.406]]],
146
- requires_grad=False, device=device)
147
  vgg_std = torch.tensor([[[0.229]], [[0.224]], [[0.225]]],
148
- requires_grad=False, device=device)
149
  def load_bisenet():
150
  bisenet_model = BiSeNet(n_classes=19)
151
  bisenet_model.load_state_dict(
152
- torch.load(make_abs_path("../weights/79999_iter.pth",), map_location="cpu")
153
  )
154
  bisenet_model.eval()
155
- bisenet_model = bisenet_model.to(device)
156
 
157
- smooth_mask = SoftErosion(kernel_size=17, threshold=0.9, iterations=7).to(device)
158
  print('[Global] bisenet loaded.')
159
  return bisenet_model, smooth_mask
160
 
 
74
  if not use_gpen:
75
  return img_np
76
  if self.gpen_model is None:
77
+ self.gpen_model = GPENImageInfer(device=global_device)
78
  img_np = self.gpen_model.image_infer(img_np)
79
  return img_np
80
 
 
139
 
140
 
141
  if torch.cuda.is_available():
142
+ global_device = torch.device(0)
143
  else:
144
+ global_device = torch.device('cpu')
145
  vgg_mean = torch.tensor([[[0.485]], [[0.456]], [[0.406]]],
146
+ requires_grad=False, device=global_device)
147
  vgg_std = torch.tensor([[[0.229]], [[0.224]], [[0.225]]],
148
+ requires_grad=False, device=global_device)
149
  def load_bisenet():
150
  bisenet_model = BiSeNet(n_classes=19)
151
  bisenet_model.load_state_dict(
152
+ torch.load(make_abs_path("../weights/bisenet/79999_iter.pth",), map_location="cpu")
153
  )
154
  bisenet_model.eval()
155
+ bisenet_model = bisenet_model.to(global_device)
156
 
157
+ smooth_mask = SoftErosion(kernel_size=17, threshold=0.9, iterations=7).to(global_device)
158
  print('[Global] bisenet loaded.')
159
  return bisenet_model, smooth_mask
160
 
inference/utils.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ from PIL import Image
4
+ # from TDDFA_V2.FaceBoxes import FaceBoxes
5
+ # from TDDFA_V2.TDDFA import TDDFA
6
+
7
+ def get_5_from_98(lmk):
8
+ lefteye = (lmk[60] + lmk[64] + lmk[96]) / 3 # lmk[96]
9
+ righteye = (lmk[68] + lmk[72] + lmk[97]) / 3 # lmk[97]
10
+ nose = lmk[54]
11
+ leftmouth = lmk[76]
12
+ rightmouth = lmk[82]
13
+ return np.array([lefteye, righteye, nose, leftmouth, rightmouth])
14
+
15
+
16
+ def get_center(points):
17
+ x = [p[0] for p in points]
18
+ y = [p[1] for p in points]
19
+ centroid = (sum(x) / len(points), sum(y) / len(points))
20
+ return np.array([centroid])
21
+
22
+
23
+ def get_lmk(img, tddfa, face_boxes):
24
+ # 仅接受一个人的图像
25
+ boxes = face_boxes(img)
26
+ n = len(boxes)
27
+ if n < 1:
28
+ return None
29
+ param_lst, roi_box_lst = tddfa(img, boxes)
30
+ ver_lst = tddfa.recon_vers(param_lst, roi_box_lst, dense_flag=False)
31
+ x = ver_lst[0].transpose(1, 0)[..., :2]
32
+ left_eye = get_center(x[36:42])
33
+ right_eye = get_center(x[42:48])
34
+ nose = x[30:31]
35
+ left_mouth = x[48:49]
36
+ right_mouth = x[54:55]
37
+ x = np.concatenate([left_eye, right_eye, nose, left_mouth, right_mouth], axis=0)
38
+ return x
39
+
40
+
41
+ def get_landmark_once(img, gpu_mode=False):
42
+ tddfa = TDDFA(
43
+ gpu_mode=gpu_mode,
44
+ arch="resnet",
45
+ checkpoint_fp="./TDDFA_V2/weights/resnet22.pth",
46
+ bfm_fp="TDDFA_V2/configs/bfm_noneck_v3.pkl",
47
+ size=120,
48
+ num_params=62,
49
+ )
50
+ face_boxes = FaceBoxes()
51
+ boxes = face_boxes(img)
52
+ n = len(boxes)
53
+ if n < 1:
54
+ return None
55
+ param_lst, roi_box_lst = tddfa(img, boxes)
56
+ ver_lst = tddfa.recon_vers(param_lst, roi_box_lst, dense_flag=False)
57
+ x = ver_lst[0].transpose(1, 0)[..., :2]
58
+ left_eye = get_center(x[36:42])
59
+ right_eye = get_center(x[42:48])
60
+ nose = x[30:31]
61
+ left_mouth = x[48:49]
62
+ right_mouth = x[54:55]
63
+ x = np.concatenate([left_eye, right_eye, nose, left_mouth, right_mouth], axis=0)
64
+ return x
65
+
66
+
67
+ def get_detector(gpu_mode=False):
68
+ tddfa = TDDFA(
69
+ gpu_mode=gpu_mode,
70
+ arch="resnet",
71
+ checkpoint_fp="./TDDFA_V2/weights/resnet22.pth",
72
+ bfm_fp="TDDFA_V2/configs/bfm_noneck_v3.pkl",
73
+ size=120,
74
+ num_params=62,
75
+ )
76
+ face_boxes = FaceBoxes()
77
+ return tddfa, face_boxes
78
+
79
+
80
+ def save(x, trick=None, use_post=False):
81
+ """ Paste img to ori_img """
82
+ img, mat, ori_img, save_path, img_mask = x
83
+ if mat is None:
84
+ print('[Warning] mat is None.')
85
+ ori_img = ori_img.astype(np.uint8)
86
+ Image.fromarray(ori_img).save(save_path)
87
+ return
88
+
89
+ H, W = img.shape[0], img.shape[1] # (256,256) or (512,512)
90
+ mat_rev = np.zeros([2, 3])
91
+ div1 = mat[0][0] * mat[1][1] - mat[0][1] * mat[1][0]
92
+ mat_rev[0][0] = mat[1][1] / div1
93
+ mat_rev[0][1] = -mat[0][1] / div1
94
+ mat_rev[0][2] = -(mat[0][2] * mat[1][1] - mat[0][1] * mat[1][2]) / div1
95
+ div2 = mat[0][1] * mat[1][0] - mat[0][0] * mat[1][1]
96
+ mat_rev[1][0] = mat[1][0] / div2
97
+ mat_rev[1][1] = -mat[0][0] / div2
98
+ mat_rev[1][2] = -(mat[0][2] * mat[1][0] - mat[0][0] * mat[1][2]) / div2
99
+
100
+ img_shape = (ori_img.shape[1], ori_img.shape[0]) # (h,w)
101
+
102
+ img = cv2.warpAffine(img, mat_rev, img_shape)
103
+
104
+ if img_mask is None:
105
+ ''' hanbang version of paste masks '''
106
+ img_white = np.full((H, W), 255, dtype=float)
107
+ img_white = cv2.warpAffine(img_white, mat_rev, img_shape)
108
+ img_white[img_white > 20] = 255
109
+ img_mask = img_white
110
+
111
+ kernel = np.ones((40, 40), np.uint8)
112
+ img_mask = cv2.erode(img_mask, kernel, iterations=2)
113
+
114
+ kernel_size = (20, 20)
115
+ blur_size = tuple(2 * j + 1 for j in kernel_size)
116
+ img_mask = cv2.GaussianBlur(img_mask, blur_size, 0)
117
+ img_mask /= 255
118
+ img_mask = np.reshape(img_mask, [img_mask.shape[0], img_mask.shape[1], 1])
119
+ else:
120
+ ''' yuange version of paste masks '''
121
+ img_mask = cv2.warpAffine(img_mask, mat_rev, img_shape)
122
+ img_mask = np.expand_dims(img_mask, axis=-1)
123
+
124
+ ori_img = img_mask * img + (1 - img_mask) * ori_img
125
+ ori_img = ori_img.astype(np.uint8)
126
+
127
+ if trick is not None:
128
+ ori_img = trick.gpen(ori_img, use_post)
129
+
130
+ Image.fromarray(ori_img).save(save_path)
131
+
132
+ # img_mask = np.array((img_mask * 255), dtype=np.uint8).squeeze()
133
+ # Image.fromarray(img_mask).save('img_mask.jpg')
third_party/GPEN/infer_image.py CHANGED
@@ -14,7 +14,7 @@ make_abs_path = lambda fn: os.path.abspath(os.path.join(os.path.dirname(os.path.
14
 
15
 
16
  class GPENImageInfer(object):
17
- def __init__(self):
18
  super(GPENImageInfer, self).__init__()
19
 
20
  model = {
@@ -32,6 +32,7 @@ class GPENImageInfer(object):
32
  model=model["name"],
33
  channel_multiplier=model["channel_multiplier"],
34
  narrow=model["narrow"],
 
35
  )
36
  self.faceenhancer = faceenhancer
37
 
@@ -77,6 +78,7 @@ class GPENImageInfer(object):
77
  :return: out_batch: (N,RGB,H,W), in [-1,1]
78
  """
79
  B, C, H, W = in_batch.shape
 
80
 
81
  in_batch = ((in_batch + 1.) * 127.5).permute(0, 2, 3, 1)
82
  in_batch = in_batch.cpu().numpy().astype(np.uint8) # (N,H,W,RGB), in [0,255]
@@ -89,7 +91,7 @@ class GPENImageInfer(object):
89
  out_batch[b_idx] = out_img[:, :, ::-1]
90
  if save_batch_idx is not None and b_idx == save_batch_idx:
91
  cv2.imwrite(os.path.join(save_folder, save_name), out_img)
92
- out_batch = torch.FloatTensor(out_batch).cuda()
93
  out_batch = out_batch / 127.5 - 1. # (N,H,W,RGB)
94
  out_batch = out_batch.permute(0, 3, 1, 2) # (N,RGB,H,W)
95
  out_batch = out_batch.clamp(-1, 1)
 
14
 
15
 
16
  class GPENImageInfer(object):
17
+ def __init__(self, device):
18
  super(GPENImageInfer, self).__init__()
19
 
20
  model = {
 
32
  model=model["name"],
33
  channel_multiplier=model["channel_multiplier"],
34
  narrow=model["narrow"],
35
+ device=device,
36
  )
37
  self.faceenhancer = faceenhancer
38
 
 
78
  :return: out_batch: (N,RGB,H,W), in [-1,1]
79
  """
80
  B, C, H, W = in_batch.shape
81
+ device = in_batch.device
82
 
83
  in_batch = ((in_batch + 1.) * 127.5).permute(0, 2, 3, 1)
84
  in_batch = in_batch.cpu().numpy().astype(np.uint8) # (N,H,W,RGB), in [0,255]
 
91
  out_batch[b_idx] = out_img[:, :, ::-1]
92
  if save_batch_idx is not None and b_idx == save_batch_idx:
93
  cv2.imwrite(os.path.join(save_folder, save_name), out_img)
94
+ out_batch = torch.FloatTensor(out_batch).to(device)
95
  out_batch = out_batch / 127.5 - 1. # (N,H,W,RGB)
96
  out_batch = out_batch.permute(0, 3, 1, 2) # (N,RGB,H,W)
97
  out_batch = out_batch.clamp(-1, 1)
third_party/PIPNet/FaceBoxesV2/detector.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+
3
+ class Detector(object):
4
+ def __init__(self, model_arch, model_weights):
5
+ self.model_arch = model_arch
6
+ self.model_weights = model_weights
7
+
8
+ def detect(self, image, thresh):
9
+ raise NotImplementedError
10
+
11
+ def crop(self, image, detections):
12
+ crops = []
13
+ for det in detections:
14
+ xmin = max(det[2], 0)
15
+ ymin = max(det[3], 0)
16
+ width = det[4]
17
+ height = det[5]
18
+ xmax = min(xmin+width, image.shape[1])
19
+ ymax = min(ymin+height, image.shape[0])
20
+ cut = image[ymin:ymax, xmin:xmax,:]
21
+ crops.append(cut)
22
+
23
+ return crops
24
+
25
+ def draw(self, image, detections, im_scale=None):
26
+ if im_scale is not None:
27
+ image = cv2.resize(image, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
28
+ detections = [[det[0],det[1],int(det[2]*im_scale),int(det[3]*im_scale),int(det[4]*im_scale),int(det[5]*im_scale)] for det in detections]
29
+
30
+ for det in detections:
31
+ xmin = det[2]
32
+ ymin = det[3]
33
+ width = det[4]
34
+ height = det[5]
35
+ xmax = xmin + width
36
+ ymax = ymin + height
37
+ cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (0, 0, 255), 2)
38
+
39
+ return image
third_party/PIPNet/FaceBoxesV2/faceboxes_detector.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from third_party.PIPNet.FaceBoxesV2.detector import Detector
2
+ import cv2, os
3
+ import numpy as np
4
+ import torch
5
+ import torch.nn as nn
6
+ from third_party.PIPNet.FaceBoxesV2.utils.config import cfg
7
+ from third_party.PIPNet.FaceBoxesV2.utils.prior_box import PriorBox
8
+ from third_party.PIPNet.FaceBoxesV2.utils.nms_wrapper import nms
9
+ from third_party.PIPNet.FaceBoxesV2.utils.faceboxes import FaceBoxesV2
10
+ from third_party.PIPNet.FaceBoxesV2.utils.box_utils import decode
11
+ import time
12
+
13
+
14
+ class FaceBoxesDetector(Detector):
15
+ def __init__(self, model_arch, model_weights, use_gpu, device):
16
+ super().__init__(model_arch, model_weights)
17
+ self.name = "FaceBoxesDetector"
18
+ self.net = FaceBoxesV2(
19
+ phase="test", size=None, num_classes=2
20
+ ) # initialize detector
21
+ self.use_gpu = use_gpu
22
+ self.device = device
23
+
24
+ state_dict = torch.load(self.model_weights, map_location=self.device)
25
+ # create new OrderedDict that does not contain `module.`
26
+ from collections import OrderedDict
27
+
28
+ new_state_dict = OrderedDict()
29
+ for k, v in state_dict.items():
30
+ name = k[7:] # remove `module.`
31
+ new_state_dict[name] = v
32
+ # load params
33
+ self.net.load_state_dict(new_state_dict)
34
+ self.net = self.net.to(self.device)
35
+ self.net.eval()
36
+
37
+ def detect(self, image, thresh=0.6, im_scale=None):
38
+ # auto resize for large images
39
+ if im_scale is None:
40
+ height, width, _ = image.shape
41
+ if min(height, width) > 600:
42
+ im_scale = 600.0 / min(height, width)
43
+ else:
44
+ im_scale = 1
45
+ image_scale = cv2.resize(
46
+ image, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR
47
+ )
48
+
49
+ scale = torch.Tensor(
50
+ [
51
+ image_scale.shape[1],
52
+ image_scale.shape[0],
53
+ image_scale.shape[1],
54
+ image_scale.shape[0],
55
+ ]
56
+ )
57
+ image_scale = (
58
+ torch.from_numpy(image_scale.transpose(2, 0, 1)).to(self.device).int()
59
+ )
60
+ mean_tmp = torch.IntTensor([104, 117, 123]).to(self.device)
61
+ mean_tmp = mean_tmp.unsqueeze(1).unsqueeze(2)
62
+ image_scale -= mean_tmp
63
+ image_scale = image_scale.float().unsqueeze(0)
64
+ scale = scale.to(self.device)
65
+
66
+ with torch.no_grad():
67
+ out = self.net(image_scale)
68
+ # priorbox = PriorBox(cfg, out[2], (image_scale.size()[2], image_scale.size()[3]), phase='test')
69
+ priorbox = PriorBox(
70
+ cfg, image_size=(image_scale.size()[2], image_scale.size()[3])
71
+ )
72
+ priors = priorbox.forward()
73
+ priors = priors.to(self.device)
74
+ loc, conf = out
75
+ prior_data = priors.data
76
+ boxes = decode(loc.data.squeeze(0), prior_data, cfg["variance"])
77
+ boxes = boxes * scale
78
+ boxes = boxes.cpu().numpy()
79
+ scores = conf.data.cpu().numpy()[:, 1]
80
+
81
+ # ignore low scores
82
+ inds = np.where(scores > thresh)[0]
83
+ boxes = boxes[inds]
84
+ scores = scores[inds]
85
+
86
+ # keep top-K before NMS
87
+ order = scores.argsort()[::-1][:5000]
88
+ boxes = boxes[order]
89
+ scores = scores[order]
90
+
91
+ # do NMS
92
+ dets = np.hstack((boxes, scores[:, np.newaxis])).astype(
93
+ np.float32, copy=False
94
+ )
95
+ keep = nms(dets, 0.3)
96
+ dets = dets[keep, :]
97
+
98
+ dets = dets[:750, :]
99
+ detections_scale = []
100
+ for i in range(dets.shape[0]):
101
+ xmin = int(dets[i][0])
102
+ ymin = int(dets[i][1])
103
+ xmax = int(dets[i][2])
104
+ ymax = int(dets[i][3])
105
+ score = dets[i][4]
106
+ width = xmax - xmin
107
+ height = ymax - ymin
108
+ detections_scale.append(["face", score, xmin, ymin, width, height])
109
+
110
+ # adapt bboxes to the original image size
111
+ if len(detections_scale) > 0:
112
+ detections_scale = [
113
+ [
114
+ det[0],
115
+ det[1],
116
+ int(det[2] / im_scale),
117
+ int(det[3] / im_scale),
118
+ int(det[4] / im_scale),
119
+ int(det[5] / im_scale),
120
+ ]
121
+ for det in detections_scale
122
+ ]
123
+
124
+ return detections_scale, im_scale
third_party/PIPNet/FaceBoxesV2/utils/__init__.py ADDED
File without changes
third_party/PIPNet/FaceBoxesV2/utils/box_utils.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+
4
+
5
+ def point_form(boxes):
6
+ """ Convert prior_boxes to (xmin, ymin, xmax, ymax)
7
+ representation for comparison to point form ground truth data.
8
+ Args:
9
+ boxes: (tensor) center-size default boxes from priorbox layers.
10
+ Return:
11
+ boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
12
+ """
13
+ return torch.cat((boxes[:, :2] - boxes[:, 2:]/2, # xmin, ymin
14
+ boxes[:, :2] + boxes[:, 2:]/2), 1) # xmax, ymax
15
+
16
+
17
+ def center_size(boxes):
18
+ """ Convert prior_boxes to (cx, cy, w, h)
19
+ representation for comparison to center-size form ground truth data.
20
+ Args:
21
+ boxes: (tensor) point_form boxes
22
+ Return:
23
+ boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
24
+ """
25
+ return torch.cat((boxes[:, 2:] + boxes[:, :2])/2, # cx, cy
26
+ boxes[:, 2:] - boxes[:, :2], 1) # w, h
27
+
28
+
29
+ def intersect(box_a, box_b):
30
+ """ We resize both tensors to [A,B,2] without new malloc:
31
+ [A,2] -> [A,1,2] -> [A,B,2]
32
+ [B,2] -> [1,B,2] -> [A,B,2]
33
+ Then we compute the area of intersect between box_a and box_b.
34
+ Args:
35
+ box_a: (tensor) bounding boxes, Shape: [A,4].
36
+ box_b: (tensor) bounding boxes, Shape: [B,4].
37
+ Return:
38
+ (tensor) intersection area, Shape: [A,B].
39
+ """
40
+ A = box_a.size(0)
41
+ B = box_b.size(0)
42
+ max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
43
+ box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
44
+ min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
45
+ box_b[:, :2].unsqueeze(0).expand(A, B, 2))
46
+ inter = torch.clamp((max_xy - min_xy), min=0)
47
+ return inter[:, :, 0] * inter[:, :, 1]
48
+
49
+
50
+ def jaccard(box_a, box_b):
51
+ """Compute the jaccard overlap of two sets of boxes. The jaccard overlap
52
+ is simply the intersection over union of two boxes. Here we operate on
53
+ ground truth boxes and default boxes.
54
+ E.g.:
55
+ A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
56
+ Args:
57
+ box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]
58
+ box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]
59
+ Return:
60
+ jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]
61
+ """
62
+ inter = intersect(box_a, box_b)
63
+ area_a = ((box_a[:, 2]-box_a[:, 0]) *
64
+ (box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B]
65
+ area_b = ((box_b[:, 2]-box_b[:, 0]) *
66
+ (box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B]
67
+ union = area_a + area_b - inter
68
+ return inter / union # [A,B]
69
+
70
+
71
+ def matrix_iou(a, b):
72
+ """
73
+ return iou of a and b, numpy version for data augenmentation
74
+ """
75
+ lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])
76
+ rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
77
+
78
+ area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)
79
+ area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
80
+ area_b = np.prod(b[:, 2:] - b[:, :2], axis=1)
81
+ return area_i / (area_a[:, np.newaxis] + area_b - area_i)
82
+
83
+
84
+ def matrix_iof(a, b):
85
+ """
86
+ return iof of a and b, numpy version for data augenmentation
87
+ """
88
+ lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])
89
+ rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
90
+
91
+ area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)
92
+ area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
93
+ return area_i / np.maximum(area_a[:, np.newaxis], 1)
94
+
95
+
96
+ def match(threshold, truths, priors, variances, labels, loc_t, conf_t, idx):
97
+ """Match each prior box with the ground truth box of the highest jaccard
98
+ overlap, encode the bounding boxes, then return the matched indices
99
+ corresponding to both confidence and location preds.
100
+ Args:
101
+ threshold: (float) The overlap threshold used when mathing boxes.
102
+ truths: (tensor) Ground truth boxes, Shape: [num_obj, num_priors].
103
+ priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4].
104
+ variances: (tensor) Variances corresponding to each prior coord,
105
+ Shape: [num_priors, 4].
106
+ labels: (tensor) All the class labels for the image, Shape: [num_obj].
107
+ loc_t: (tensor) Tensor to be filled w/ endcoded location targets.
108
+ conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds.
109
+ idx: (int) current batch index
110
+ Return:
111
+ The matched indices corresponding to 1)location and 2)confidence preds.
112
+ """
113
+ # jaccard index
114
+ overlaps = jaccard(
115
+ truths,
116
+ point_form(priors)
117
+ )
118
+ # (Bipartite Matching)
119
+ # [1,num_objects] best prior for each ground truth
120
+ best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True)
121
+
122
+ # ignore hard gt
123
+ valid_gt_idx = best_prior_overlap[:, 0] >= 0.2
124
+ best_prior_idx_filter = best_prior_idx[valid_gt_idx, :]
125
+ if best_prior_idx_filter.shape[0] <= 0:
126
+ loc_t[idx] = 0
127
+ conf_t[idx] = 0
128
+ return
129
+
130
+ # [1,num_priors] best ground truth for each prior
131
+ best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True)
132
+ best_truth_idx.squeeze_(0)
133
+ best_truth_overlap.squeeze_(0)
134
+ best_prior_idx.squeeze_(1)
135
+ best_prior_idx_filter.squeeze_(1)
136
+ best_prior_overlap.squeeze_(1)
137
+ best_truth_overlap.index_fill_(0, best_prior_idx_filter, 2) # ensure best prior
138
+ # NoTODO refactor: index best_prior_idx with long tensor
139
+ # ensure every gt matches with its prior of max overlap
140
+ for j in range(best_prior_idx.size(0)):
141
+ best_truth_idx[best_prior_idx[j]] = j
142
+ matches = truths[best_truth_idx] # Shape: [num_priors,4]
143
+ conf = labels[best_truth_idx] # Shape: [num_priors]
144
+ conf[best_truth_overlap < threshold] = 0 # label as background
145
+ loc = encode(matches, priors, variances)
146
+ loc_t[idx] = loc # [num_priors,4] encoded offsets to learn
147
+ conf_t[idx] = conf # [num_priors] top class label for each prior
148
+
149
+
150
+ def encode(matched, priors, variances):
151
+ """Encode the variances from the priorbox layers into the ground truth boxes
152
+ we have matched (based on jaccard overlap) with the prior boxes.
153
+ Args:
154
+ matched: (tensor) Coords of ground truth for each prior in point-form
155
+ Shape: [num_priors, 4].
156
+ priors: (tensor) Prior boxes in center-offset form
157
+ Shape: [num_priors,4].
158
+ variances: (list[float]) Variances of priorboxes
159
+ Return:
160
+ encoded boxes (tensor), Shape: [num_priors, 4]
161
+ """
162
+
163
+ # dist b/t match center and prior's center
164
+ g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]
165
+ # encode variance
166
+ g_cxcy /= (variances[0] * priors[:, 2:])
167
+ # match wh / prior wh
168
+ g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
169
+ g_wh = torch.log(g_wh) / variances[1]
170
+ # return target for smooth_l1_loss
171
+ return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]
172
+
173
+
174
+ # Adapted from https://github.com/Hakuyume/chainer-ssd
175
+ def decode(loc, priors, variances):
176
+ """Decode locations from predictions using priors to undo
177
+ the encoding we did for offset regression at train time.
178
+ Args:
179
+ loc (tensor): location predictions for loc layers,
180
+ Shape: [num_priors,4]
181
+ priors (tensor): Prior boxes in center-offset form.
182
+ Shape: [num_priors,4].
183
+ variances: (list[float]) Variances of priorboxes
184
+ Return:
185
+ decoded bounding box predictions
186
+ """
187
+
188
+ boxes = torch.cat((
189
+ priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
190
+ priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
191
+ boxes[:, :2] -= boxes[:, 2:] / 2
192
+ boxes[:, 2:] += boxes[:, :2]
193
+ return boxes
194
+
195
+
196
+ def log_sum_exp(x):
197
+ """Utility function for computing log_sum_exp while determining
198
+ This will be used to determine unaveraged confidence loss across
199
+ all examples in a batch.
200
+ Args:
201
+ x (Variable(tensor)): conf_preds from conf layers
202
+ """
203
+ x_max = x.data.max()
204
+ return torch.log(torch.sum(torch.exp(x-x_max), 1, keepdim=True)) + x_max
205
+
206
+
207
+ # Original author: Francisco Massa:
208
+ # https://github.com/fmassa/object-detection.torch
209
+ # Ported to PyTorch by Max deGroot (02/01/2017)
210
+ def nms(boxes, scores, overlap=0.5, top_k=200):
211
+ """Apply non-maximum suppression at test time to avoid detecting too many
212
+ overlapping bounding boxes for a given object.
213
+ Args:
214
+ boxes: (tensor) The location preds for the img, Shape: [num_priors,4].
215
+ scores: (tensor) The class predscores for the img, Shape:[num_priors].
216
+ overlap: (float) The overlap thresh for suppressing unnecessary boxes.
217
+ top_k: (int) The Maximum number of box preds to consider.
218
+ Return:
219
+ The indices of the kept boxes with respect to num_priors.
220
+ """
221
+
222
+ keep = torch.Tensor(scores.size(0)).fill_(0).long()
223
+ if boxes.numel() == 0:
224
+ return keep
225
+ x1 = boxes[:, 0]
226
+ y1 = boxes[:, 1]
227
+ x2 = boxes[:, 2]
228
+ y2 = boxes[:, 3]
229
+ area = torch.mul(x2 - x1, y2 - y1)
230
+ v, idx = scores.sort(0) # sort in ascending order
231
+ # I = I[v >= 0.01]
232
+ idx = idx[-top_k:] # indices of the top-k largest vals
233
+ xx1 = boxes.new()
234
+ yy1 = boxes.new()
235
+ xx2 = boxes.new()
236
+ yy2 = boxes.new()
237
+ w = boxes.new()
238
+ h = boxes.new()
239
+
240
+ # keep = torch.Tensor()
241
+ count = 0
242
+ while idx.numel() > 0:
243
+ i = idx[-1] # index of current largest val
244
+ # keep.append(i)
245
+ keep[count] = i
246
+ count += 1
247
+ if idx.size(0) == 1:
248
+ break
249
+ idx = idx[:-1] # remove kept element from view
250
+ # load bboxes of next highest vals
251
+ torch.index_select(x1, 0, idx, out=xx1)
252
+ torch.index_select(y1, 0, idx, out=yy1)
253
+ torch.index_select(x2, 0, idx, out=xx2)
254
+ torch.index_select(y2, 0, idx, out=yy2)
255
+ # store element-wise max with next highest score
256
+ xx1 = torch.clamp(xx1, min=x1[i])
257
+ yy1 = torch.clamp(yy1, min=y1[i])
258
+ xx2 = torch.clamp(xx2, max=x2[i])
259
+ yy2 = torch.clamp(yy2, max=y2[i])
260
+ w.resize_as_(xx2)
261
+ h.resize_as_(yy2)
262
+ w = xx2 - xx1
263
+ h = yy2 - yy1
264
+ # check sizes of xx1 and xx2.. after each iteration
265
+ w = torch.clamp(w, min=0.0)
266
+ h = torch.clamp(h, min=0.0)
267
+ inter = w*h
268
+ # IoU = i / (area(a) + area(b) - i)
269
+ rem_areas = torch.index_select(area, 0, idx) # load remaining areas)
270
+ union = (rem_areas - inter) + area[i]
271
+ IoU = inter/union # store result in iou
272
+ # keep only elements with an IoU <= overlap
273
+ idx = idx[IoU.le(overlap)]
274
+ return keep, count
275
+
276
+
third_party/PIPNet/FaceBoxesV2/utils/build.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+
3
+ # --------------------------------------------------------
4
+ # Fast R-CNN
5
+ # Copyright (c) 2015 Microsoft
6
+ # Licensed under The MIT License [see LICENSE for details]
7
+ # Written by Ross Girshick
8
+ # --------------------------------------------------------
9
+
10
+ import os
11
+ from os.path import join as pjoin
12
+ import numpy as np
13
+ from distutils.core import setup
14
+ from distutils.extension import Extension
15
+ from Cython.Distutils import build_ext
16
+
17
+
18
+ def find_in_path(name, path):
19
+ "Find a file in a search path"
20
+ # adapted fom http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/
21
+ for dir in path.split(os.pathsep):
22
+ binpath = pjoin(dir, name)
23
+ if os.path.exists(binpath):
24
+ return os.path.abspath(binpath)
25
+ return None
26
+
27
+
28
+ # Obtain the numpy include directory. This logic works across numpy versions.
29
+ try:
30
+ numpy_include = np.get_include()
31
+ except AttributeError:
32
+ numpy_include = np.get_numpy_include()
33
+
34
+
35
+ # run the customize_compiler
36
+ class custom_build_ext(build_ext):
37
+ def build_extensions(self):
38
+ # customize_compiler_for_nvcc(self.compiler)
39
+ build_ext.build_extensions(self)
40
+
41
+
42
+ ext_modules = [
43
+ Extension(
44
+ "nms.cpu_nms",
45
+ ["nms/cpu_nms.pyx"],
46
+ # extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
47
+ extra_compile_args=["-Wno-cpp", "-Wno-unused-function"],
48
+ include_dirs=[numpy_include]
49
+ )
50
+ ]
51
+
52
+ setup(
53
+ name='mot_utils',
54
+ ext_modules=ext_modules,
55
+ # inject our custom trigger
56
+ cmdclass={'build_ext': custom_build_ext},
57
+ )
third_party/PIPNet/FaceBoxesV2/utils/config.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # config.py
2
+
3
+ cfg = {
4
+ 'name': 'FaceBoxes',
5
+ #'min_dim': 1024,
6
+ #'feature_maps': [[32, 32], [16, 16], [8, 8]],
7
+ # 'aspect_ratios': [[1], [1], [1]],
8
+ 'min_sizes': [[32, 64, 128], [256], [512]],
9
+ 'steps': [32, 64, 128],
10
+ 'variance': [0.1, 0.2],
11
+ 'clip': False,
12
+ 'loc_weight': 2.0,
13
+ 'gpu_train': True
14
+ }
third_party/PIPNet/FaceBoxesV2/utils/faceboxes.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+
6
+ class BasicConv2d(nn.Module):
7
+
8
+ def __init__(self, in_channels, out_channels, **kwargs):
9
+ super(BasicConv2d, self).__init__()
10
+ self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
11
+ self.bn = nn.BatchNorm2d(out_channels, eps=1e-5)
12
+
13
+ def forward(self, x):
14
+ x = self.conv(x)
15
+ x = self.bn(x)
16
+ return F.relu(x, inplace=True)
17
+
18
+
19
+ class Inception(nn.Module):
20
+
21
+ def __init__(self):
22
+ super(Inception, self).__init__()
23
+ self.branch1x1 = BasicConv2d(128, 32, kernel_size=1, padding=0)
24
+ self.branch1x1_2 = BasicConv2d(128, 32, kernel_size=1, padding=0)
25
+ self.branch3x3_reduce = BasicConv2d(128, 24, kernel_size=1, padding=0)
26
+ self.branch3x3 = BasicConv2d(24, 32, kernel_size=3, padding=1)
27
+ self.branch3x3_reduce_2 = BasicConv2d(128, 24, kernel_size=1, padding=0)
28
+ self.branch3x3_2 = BasicConv2d(24, 32, kernel_size=3, padding=1)
29
+ self.branch3x3_3 = BasicConv2d(32, 32, kernel_size=3, padding=1)
30
+
31
+ def forward(self, x):
32
+ branch1x1 = self.branch1x1(x)
33
+
34
+ branch1x1_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
35
+ branch1x1_2 = self.branch1x1_2(branch1x1_pool)
36
+
37
+ branch3x3_reduce = self.branch3x3_reduce(x)
38
+ branch3x3 = self.branch3x3(branch3x3_reduce)
39
+
40
+ branch3x3_reduce_2 = self.branch3x3_reduce_2(x)
41
+ branch3x3_2 = self.branch3x3_2(branch3x3_reduce_2)
42
+ branch3x3_3 = self.branch3x3_3(branch3x3_2)
43
+
44
+ outputs = [branch1x1, branch1x1_2, branch3x3, branch3x3_3]
45
+ return torch.cat(outputs, 1)
46
+
47
+
48
+ class CRelu(nn.Module):
49
+
50
+ def __init__(self, in_channels, out_channels, **kwargs):
51
+ super(CRelu, self).__init__()
52
+ self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
53
+ self.bn = nn.BatchNorm2d(out_channels, eps=1e-5)
54
+
55
+ def forward(self, x):
56
+ x = self.conv(x)
57
+ x = self.bn(x)
58
+ x = torch.cat([x, -x], 1)
59
+ x = F.relu(x, inplace=True)
60
+ return x
61
+
62
+
63
+ class FaceBoxes(nn.Module):
64
+
65
+ def __init__(self, phase, size, num_classes):
66
+ super(FaceBoxes, self).__init__()
67
+ self.phase = phase
68
+ self.num_classes = num_classes
69
+ self.size = size
70
+
71
+ self.conv1 = CRelu(3, 24, kernel_size=7, stride=4, padding=3)
72
+ self.conv2 = CRelu(48, 64, kernel_size=5, stride=2, padding=2)
73
+
74
+ self.inception1 = Inception()
75
+ self.inception2 = Inception()
76
+ self.inception3 = Inception()
77
+
78
+ self.conv3_1 = BasicConv2d(128, 128, kernel_size=1, stride=1, padding=0)
79
+ self.conv3_2 = BasicConv2d(128, 256, kernel_size=3, stride=2, padding=1)
80
+
81
+ self.conv4_1 = BasicConv2d(256, 128, kernel_size=1, stride=1, padding=0)
82
+ self.conv4_2 = BasicConv2d(128, 256, kernel_size=3, stride=2, padding=1)
83
+
84
+ self.loc, self.conf = self.multibox(self.num_classes)
85
+
86
+ if self.phase == 'test':
87
+ self.softmax = nn.Softmax(dim=-1)
88
+
89
+ if self.phase == 'train':
90
+ for m in self.modules():
91
+ if isinstance(m, nn.Conv2d):
92
+ if m.bias is not None:
93
+ nn.init.xavier_normal_(m.weight.data)
94
+ m.bias.data.fill_(0.02)
95
+ else:
96
+ m.weight.data.normal_(0, 0.01)
97
+ elif isinstance(m, nn.BatchNorm2d):
98
+ m.weight.data.fill_(1)
99
+ m.bias.data.zero_()
100
+
101
+ def multibox(self, num_classes):
102
+ loc_layers = []
103
+ conf_layers = []
104
+ loc_layers += [nn.Conv2d(128, 21 * 4, kernel_size=3, padding=1)]
105
+ conf_layers += [nn.Conv2d(128, 21 * num_classes, kernel_size=3, padding=1)]
106
+ loc_layers += [nn.Conv2d(256, 1 * 4, kernel_size=3, padding=1)]
107
+ conf_layers += [nn.Conv2d(256, 1 * num_classes, kernel_size=3, padding=1)]
108
+ loc_layers += [nn.Conv2d(256, 1 * 4, kernel_size=3, padding=1)]
109
+ conf_layers += [nn.Conv2d(256, 1 * num_classes, kernel_size=3, padding=1)]
110
+ return nn.Sequential(*loc_layers), nn.Sequential(*conf_layers)
111
+
112
+ def forward(self, x):
113
+
114
+ detection_sources = list()
115
+ loc = list()
116
+ conf = list()
117
+
118
+ x = self.conv1(x)
119
+ x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)
120
+ x = self.conv2(x)
121
+ x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)
122
+ x = self.inception1(x)
123
+ x = self.inception2(x)
124
+ x = self.inception3(x)
125
+ detection_sources.append(x)
126
+
127
+ x = self.conv3_1(x)
128
+ x = self.conv3_2(x)
129
+ detection_sources.append(x)
130
+
131
+ x = self.conv4_1(x)
132
+ x = self.conv4_2(x)
133
+ detection_sources.append(x)
134
+
135
+ for (x, l, c) in zip(detection_sources, self.loc, self.conf):
136
+ loc.append(l(x).permute(0, 2, 3, 1).contiguous())
137
+ conf.append(c(x).permute(0, 2, 3, 1).contiguous())
138
+
139
+ loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
140
+ conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
141
+
142
+ if self.phase == "test":
143
+ output = (loc.view(loc.size(0), -1, 4),
144
+ self.softmax(conf.view(conf.size(0), -1, self.num_classes)))
145
+ else:
146
+ output = (loc.view(loc.size(0), -1, 4),
147
+ conf.view(conf.size(0), -1, self.num_classes))
148
+
149
+ return output
150
+
151
+ class FaceBoxesV2(nn.Module):
152
+
153
+ def __init__(self, phase, size, num_classes):
154
+ super(FaceBoxesV2, self).__init__()
155
+ self.phase = phase
156
+ self.num_classes = num_classes
157
+ self.size = size
158
+
159
+ self.conv1 = BasicConv2d(3, 8, kernel_size=3, stride=2, padding=1)
160
+ self.conv2 = BasicConv2d(8, 16, kernel_size=3, stride=2, padding=1)
161
+ self.conv3 = BasicConv2d(16, 32, kernel_size=3, stride=2, padding=1)
162
+ self.conv4 = BasicConv2d(32, 64, kernel_size=3, stride=2, padding=1)
163
+ self.conv5 = BasicConv2d(64, 128, kernel_size=3, stride=2, padding=1)
164
+
165
+ self.inception1 = Inception()
166
+ self.inception2 = Inception()
167
+ self.inception3 = Inception()
168
+
169
+ self.conv6_1 = BasicConv2d(128, 128, kernel_size=1, stride=1, padding=0)
170
+ self.conv6_2 = BasicConv2d(128, 256, kernel_size=3, stride=2, padding=1)
171
+
172
+ self.conv7_1 = BasicConv2d(256, 128, kernel_size=1, stride=1, padding=0)
173
+ self.conv7_2 = BasicConv2d(128, 256, kernel_size=3, stride=2, padding=1)
174
+
175
+ self.loc, self.conf = self.multibox(self.num_classes)
176
+
177
+ if self.phase == 'test':
178
+ self.softmax = nn.Softmax(dim=-1)
179
+
180
+ if self.phase == 'train':
181
+ for m in self.modules():
182
+ if isinstance(m, nn.Conv2d):
183
+ if m.bias is not None:
184
+ nn.init.xavier_normal_(m.weight.data)
185
+ m.bias.data.fill_(0.02)
186
+ else:
187
+ m.weight.data.normal_(0, 0.01)
188
+ elif isinstance(m, nn.BatchNorm2d):
189
+ m.weight.data.fill_(1)
190
+ m.bias.data.zero_()
191
+
192
+ def multibox(self, num_classes):
193
+ loc_layers = []
194
+ conf_layers = []
195
+ loc_layers += [nn.Conv2d(128, 21 * 4, kernel_size=3, padding=1)]
196
+ conf_layers += [nn.Conv2d(128, 21 * num_classes, kernel_size=3, padding=1)]
197
+ loc_layers += [nn.Conv2d(256, 1 * 4, kernel_size=3, padding=1)]
198
+ conf_layers += [nn.Conv2d(256, 1 * num_classes, kernel_size=3, padding=1)]
199
+ loc_layers += [nn.Conv2d(256, 1 * 4, kernel_size=3, padding=1)]
200
+ conf_layers += [nn.Conv2d(256, 1 * num_classes, kernel_size=3, padding=1)]
201
+ return nn.Sequential(*loc_layers), nn.Sequential(*conf_layers)
202
+
203
+ def forward(self, x):
204
+
205
+ sources = list()
206
+ loc = list()
207
+ conf = list()
208
+
209
+ x = self.conv1(x)
210
+ x = self.conv2(x)
211
+ x = self.conv3(x)
212
+ x = self.conv4(x)
213
+ x = self.conv5(x)
214
+ x = self.inception1(x)
215
+ x = self.inception2(x)
216
+ x = self.inception3(x)
217
+ sources.append(x)
218
+ x = self.conv6_1(x)
219
+ x = self.conv6_2(x)
220
+ sources.append(x)
221
+ x = self.conv7_1(x)
222
+ x = self.conv7_2(x)
223
+ sources.append(x)
224
+
225
+ for (x, l, c) in zip(sources, self.loc, self.conf):
226
+ loc.append(l(x).permute(0, 2, 3, 1).contiguous())
227
+ conf.append(c(x).permute(0, 2, 3, 1).contiguous())
228
+
229
+ loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
230
+ conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
231
+
232
+ if self.phase == "test":
233
+ output = (loc.view(loc.size(0), -1, 4),
234
+ self.softmax(conf.view(-1, self.num_classes)))
235
+ else:
236
+ output = (loc.view(loc.size(0), -1, 4),
237
+ conf.view(conf.size(0), -1, self.num_classes))
238
+
239
+ return output
third_party/PIPNet/FaceBoxesV2/utils/make.sh ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ python3 build.py build_ext --inplace
3
+
third_party/PIPNet/FaceBoxesV2/utils/nms/__init__.py ADDED
File without changes
third_party/PIPNet/FaceBoxesV2/utils/nms/cpu_nms.c ADDED
The diff for this file is too large to render. See raw diff
 
third_party/PIPNet/FaceBoxesV2/utils/nms/cpu_nms.cpython-36m-x86_64-linux-gnu.so ADDED
Binary file (362 kB). View file
 
third_party/PIPNet/FaceBoxesV2/utils/nms/cpu_nms.cpython-38-x86_64-linux-gnu.so ADDED
Binary file (626 kB). View file
 
third_party/PIPNet/FaceBoxesV2/utils/nms/cpu_nms.pyx ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # Fast R-CNN
3
+ # Copyright (c) 2015 Microsoft
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # Written by Ross Girshick
6
+ # --------------------------------------------------------
7
+
8
+ import numpy as np
9
+ cimport numpy as np
10
+
11
+ cdef inline np.float32_t max(np.float32_t a, np.float32_t b):
12
+ return a if a >= b else b
13
+
14
+ cdef inline np.float32_t min(np.float32_t a, np.float32_t b):
15
+ return a if a <= b else b
16
+
17
+ def cpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh):
18
+ cdef np.ndarray[np.float32_t, ndim=1] x1 = dets[:, 0]
19
+ cdef np.ndarray[np.float32_t, ndim=1] y1 = dets[:, 1]
20
+ cdef np.ndarray[np.float32_t, ndim=1] x2 = dets[:, 2]
21
+ cdef np.ndarray[np.float32_t, ndim=1] y2 = dets[:, 3]
22
+ cdef np.ndarray[np.float32_t, ndim=1] scores = dets[:, 4]
23
+
24
+ cdef np.ndarray[np.float32_t, ndim=1] areas = (x2 - x1 + 1) * (y2 - y1 + 1)
25
+ cdef np.ndarray[np.int_t, ndim=1] order = scores.argsort()[::-1]
26
+
27
+ cdef int ndets = dets.shape[0]
28
+ cdef np.ndarray[np.int_t, ndim=1] suppressed = \
29
+ np.zeros((ndets), dtype=np.int)
30
+
31
+ # nominal indices
32
+ cdef int _i, _j
33
+ # sorted indices
34
+ cdef int i, j
35
+ # temp variables for box i's (the box currently under consideration)
36
+ cdef np.float32_t ix1, iy1, ix2, iy2, iarea
37
+ # variables for computing overlap with box j (lower scoring box)
38
+ cdef np.float32_t xx1, yy1, xx2, yy2
39
+ cdef np.float32_t w, h
40
+ cdef np.float32_t inter, ovr
41
+
42
+ keep = []
43
+ for _i in range(ndets):
44
+ i = order[_i]
45
+ if suppressed[i] == 1:
46
+ continue
47
+ keep.append(i)
48
+ ix1 = x1[i]
49
+ iy1 = y1[i]
50
+ ix2 = x2[i]
51
+ iy2 = y2[i]
52
+ iarea = areas[i]
53
+ for _j in range(_i + 1, ndets):
54
+ j = order[_j]
55
+ if suppressed[j] == 1:
56
+ continue
57
+ xx1 = max(ix1, x1[j])
58
+ yy1 = max(iy1, y1[j])
59
+ xx2 = min(ix2, x2[j])
60
+ yy2 = min(iy2, y2[j])
61
+ w = max(0.0, xx2 - xx1 + 1)
62
+ h = max(0.0, yy2 - yy1 + 1)
63
+ inter = w * h
64
+ ovr = inter / (iarea + areas[j] - inter)
65
+ if ovr >= thresh:
66
+ suppressed[j] = 1
67
+
68
+ return keep
69
+
70
+ def cpu_soft_nms(np.ndarray[float, ndim=2] boxes, float sigma=0.5, float Nt=0.3, float threshold=0.001, unsigned int method=0):
71
+ cdef unsigned int N = boxes.shape[0]
72
+ cdef float iw, ih, box_area
73
+ cdef float ua
74
+ cdef int pos = 0
75
+ cdef float maxscore = 0
76
+ cdef int maxpos = 0
77
+ cdef float x1,x2,y1,y2,tx1,tx2,ty1,ty2,ts,area,weight,ov
78
+
79
+ for i in range(N):
80
+ maxscore = boxes[i, 4]
81
+ maxpos = i
82
+
83
+ tx1 = boxes[i,0]
84
+ ty1 = boxes[i,1]
85
+ tx2 = boxes[i,2]
86
+ ty2 = boxes[i,3]
87
+ ts = boxes[i,4]
88
+
89
+ pos = i + 1
90
+ # get max box
91
+ while pos < N:
92
+ if maxscore < boxes[pos, 4]:
93
+ maxscore = boxes[pos, 4]
94
+ maxpos = pos
95
+ pos = pos + 1
96
+
97
+ # add max box as a detection
98
+ boxes[i,0] = boxes[maxpos,0]
99
+ boxes[i,1] = boxes[maxpos,1]
100
+ boxes[i,2] = boxes[maxpos,2]
101
+ boxes[i,3] = boxes[maxpos,3]
102
+ boxes[i,4] = boxes[maxpos,4]
103
+
104
+ # swap ith box with position of max box
105
+ boxes[maxpos,0] = tx1
106
+ boxes[maxpos,1] = ty1
107
+ boxes[maxpos,2] = tx2
108
+ boxes[maxpos,3] = ty2
109
+ boxes[maxpos,4] = ts
110
+
111
+ tx1 = boxes[i,0]
112
+ ty1 = boxes[i,1]
113
+ tx2 = boxes[i,2]
114
+ ty2 = boxes[i,3]
115
+ ts = boxes[i,4]
116
+
117
+ pos = i + 1
118
+ # NMS iterations, note that N changes if detection boxes fall below threshold
119
+ while pos < N:
120
+ x1 = boxes[pos, 0]
121
+ y1 = boxes[pos, 1]
122
+ x2 = boxes[pos, 2]
123
+ y2 = boxes[pos, 3]
124
+ s = boxes[pos, 4]
125
+
126
+ area = (x2 - x1 + 1) * (y2 - y1 + 1)
127
+ iw = (min(tx2, x2) - max(tx1, x1) + 1)
128
+ if iw > 0:
129
+ ih = (min(ty2, y2) - max(ty1, y1) + 1)
130
+ if ih > 0:
131
+ ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih)
132
+ ov = iw * ih / ua #iou between max box and detection box
133
+
134
+ if method == 1: # linear
135
+ if ov > Nt:
136
+ weight = 1 - ov
137
+ else:
138
+ weight = 1
139
+ elif method == 2: # gaussian
140
+ weight = np.exp(-(ov * ov)/sigma)
141
+ else: # original NMS
142
+ if ov > Nt:
143
+ weight = 0
144
+ else:
145
+ weight = 1
146
+
147
+ boxes[pos, 4] = weight*boxes[pos, 4]
148
+
149
+ # if box score falls below threshold, discard the box by swapping with last box
150
+ # update N
151
+ if boxes[pos, 4] < threshold:
152
+ boxes[pos,0] = boxes[N-1, 0]
153
+ boxes[pos,1] = boxes[N-1, 1]
154
+ boxes[pos,2] = boxes[N-1, 2]
155
+ boxes[pos,3] = boxes[N-1, 3]
156
+ boxes[pos,4] = boxes[N-1, 4]
157
+ N = N - 1
158
+ pos = pos - 1
159
+
160
+ pos = pos + 1
161
+
162
+ keep = [i for i in range(N)]
163
+ return keep
third_party/PIPNet/FaceBoxesV2/utils/nms/gpu_nms.hpp ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num,
2
+ int boxes_dim, float nms_overlap_thresh, int device_id);
third_party/PIPNet/FaceBoxesV2/utils/nms/gpu_nms.pyx ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # Faster R-CNN
3
+ # Copyright (c) 2015 Microsoft
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # Written by Ross Girshick
6
+ # --------------------------------------------------------
7
+
8
+ import numpy as np
9
+ cimport numpy as np
10
+
11
+ assert sizeof(int) == sizeof(np.int32_t)
12
+
13
+ cdef extern from "gpu_nms.hpp":
14
+ void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int)
15
+
16
+ def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh,
17
+ np.int32_t device_id=0):
18
+ cdef int boxes_num = dets.shape[0]
19
+ cdef int boxes_dim = dets.shape[1]
20
+ cdef int num_out
21
+ cdef np.ndarray[np.int32_t, ndim=1] \
22
+ keep = np.zeros(boxes_num, dtype=np.int32)
23
+ cdef np.ndarray[np.float32_t, ndim=1] \
24
+ scores = dets[:, 4]
25
+ cdef np.ndarray[np.int_t, ndim=1] \
26
+ order = scores.argsort()[::-1]
27
+ cdef np.ndarray[np.float32_t, ndim=2] \
28
+ sorted_dets = dets[order, :]
29
+ _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id)
30
+ keep = keep[:num_out]
31
+ return list(order[keep])
third_party/PIPNet/FaceBoxesV2/utils/nms/nms_kernel.cu ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // ------------------------------------------------------------------
2
+ // Faster R-CNN
3
+ // Copyright (c) 2015 Microsoft
4
+ // Licensed under The MIT License [see fast-rcnn/LICENSE for details]
5
+ // Written by Shaoqing Ren
6
+ // ------------------------------------------------------------------
7
+
8
+ #include "gpu_nms.hpp"
9
+ #include <vector>
10
+ #include <iostream>
11
+
12
+ #define CUDA_CHECK(condition) \
13
+ /* Code block avoids redefinition of cudaError_t error */ \
14
+ do { \
15
+ cudaError_t error = condition; \
16
+ if (error != cudaSuccess) { \
17
+ std::cout << cudaGetErrorString(error) << std::endl; \
18
+ } \
19
+ } while (0)
20
+
21
+ #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
22
+ int const threadsPerBlock = sizeof(unsigned long long) * 8;
23
+
24
+ __device__ inline float devIoU(float const * const a, float const * const b) {
25
+ float left = max(a[0], b[0]), right = min(a[2], b[2]);
26
+ float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
27
+ float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
28
+ float interS = width * height;
29
+ float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
30
+ float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
31
+ return interS / (Sa + Sb - interS);
32
+ }
33
+
34
+ __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
35
+ const float *dev_boxes, unsigned long long *dev_mask) {
36
+ const int row_start = blockIdx.y;
37
+ const int col_start = blockIdx.x;
38
+
39
+ // if (row_start > col_start) return;
40
+
41
+ const int row_size =
42
+ min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
43
+ const int col_size =
44
+ min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
45
+
46
+ __shared__ float block_boxes[threadsPerBlock * 5];
47
+ if (threadIdx.x < col_size) {
48
+ block_boxes[threadIdx.x * 5 + 0] =
49
+ dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
50
+ block_boxes[threadIdx.x * 5 + 1] =
51
+ dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
52
+ block_boxes[threadIdx.x * 5 + 2] =
53
+ dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
54
+ block_boxes[threadIdx.x * 5 + 3] =
55
+ dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
56
+ block_boxes[threadIdx.x * 5 + 4] =
57
+ dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
58
+ }
59
+ __syncthreads();
60
+
61
+ if (threadIdx.x < row_size) {
62
+ const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
63
+ const float *cur_box = dev_boxes + cur_box_idx * 5;
64
+ int i = 0;
65
+ unsigned long long t = 0;
66
+ int start = 0;
67
+ if (row_start == col_start) {
68
+ start = threadIdx.x + 1;
69
+ }
70
+ for (i = start; i < col_size; i++) {
71
+ if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
72
+ t |= 1ULL << i;
73
+ }
74
+ }
75
+ const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
76
+ dev_mask[cur_box_idx * col_blocks + col_start] = t;
77
+ }
78
+ }
79
+
80
+ void _set_device(int device_id) {
81
+ int current_device;
82
+ CUDA_CHECK(cudaGetDevice(&current_device));
83
+ if (current_device == device_id) {
84
+ return;
85
+ }
86
+ // The call to cudaSetDevice must come before any calls to Get, which
87
+ // may perform initialization using the GPU.
88
+ CUDA_CHECK(cudaSetDevice(device_id));
89
+ }
90
+
91
+ void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num,
92
+ int boxes_dim, float nms_overlap_thresh, int device_id) {
93
+ _set_device(device_id);
94
+
95
+ float* boxes_dev = NULL;
96
+ unsigned long long* mask_dev = NULL;
97
+
98
+ const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
99
+
100
+ CUDA_CHECK(cudaMalloc(&boxes_dev,
101
+ boxes_num * boxes_dim * sizeof(float)));
102
+ CUDA_CHECK(cudaMemcpy(boxes_dev,
103
+ boxes_host,
104
+ boxes_num * boxes_dim * sizeof(float),
105
+ cudaMemcpyHostToDevice));
106
+
107
+ CUDA_CHECK(cudaMalloc(&mask_dev,
108
+ boxes_num * col_blocks * sizeof(unsigned long long)));
109
+
110
+ dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
111
+ DIVUP(boxes_num, threadsPerBlock));
112
+ dim3 threads(threadsPerBlock);
113
+ nms_kernel<<<blocks, threads>>>(boxes_num,
114
+ nms_overlap_thresh,
115
+ boxes_dev,
116
+ mask_dev);
117
+
118
+ std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
119
+ CUDA_CHECK(cudaMemcpy(&mask_host[0],
120
+ mask_dev,
121
+ sizeof(unsigned long long) * boxes_num * col_blocks,
122
+ cudaMemcpyDeviceToHost));
123
+
124
+ std::vector<unsigned long long> remv(col_blocks);
125
+ memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
126
+
127
+ int num_to_keep = 0;
128
+ for (int i = 0; i < boxes_num; i++) {
129
+ int nblock = i / threadsPerBlock;
130
+ int inblock = i % threadsPerBlock;
131
+
132
+ if (!(remv[nblock] & (1ULL << inblock))) {
133
+ keep_out[num_to_keep++] = i;
134
+ unsigned long long *p = &mask_host[0] + i * col_blocks;
135
+ for (int j = nblock; j < col_blocks; j++) {
136
+ remv[j] |= p[j];
137
+ }
138
+ }
139
+ }
140
+ *num_out = num_to_keep;
141
+
142
+ CUDA_CHECK(cudaFree(boxes_dev));
143
+ CUDA_CHECK(cudaFree(mask_dev));
144
+ }
third_party/PIPNet/FaceBoxesV2/utils/nms/py_cpu_nms.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # Fast R-CNN
3
+ # Copyright (c) 2015 Microsoft
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # Written by Ross Girshick
6
+ # --------------------------------------------------------
7
+
8
+ import numpy as np
9
+
10
+ def py_cpu_nms(dets, thresh):
11
+ """Pure Python NMS baseline."""
12
+ x1 = dets[:, 0]
13
+ y1 = dets[:, 1]
14
+ x2 = dets[:, 2]
15
+ y2 = dets[:, 3]
16
+ scores = dets[:, 4]
17
+
18
+ areas = (x2 - x1 + 1) * (y2 - y1 + 1)
19
+ order = scores.argsort()[::-1]
20
+
21
+ keep = []
22
+ while order.size > 0:
23
+ i = order[0]
24
+ keep.append(i)
25
+ xx1 = np.maximum(x1[i], x1[order[1:]])
26
+ yy1 = np.maximum(y1[i], y1[order[1:]])
27
+ xx2 = np.minimum(x2[i], x2[order[1:]])
28
+ yy2 = np.minimum(y2[i], y2[order[1:]])
29
+
30
+ w = np.maximum(0.0, xx2 - xx1 + 1)
31
+ h = np.maximum(0.0, yy2 - yy1 + 1)
32
+ inter = w * h
33
+ ovr = inter / (areas[i] + areas[order[1:]] - inter)
34
+
35
+ inds = np.where(ovr <= thresh)[0]
36
+ order = order[inds + 1]
37
+
38
+ return keep
third_party/PIPNet/FaceBoxesV2/utils/nms_wrapper.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # Fast R-CNN
3
+ # Copyright (c) 2015 Microsoft
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # Written by Ross Girshick
6
+ # --------------------------------------------------------
7
+
8
+ from .nms.cpu_nms import cpu_nms, cpu_soft_nms
9
+
10
+ def nms(dets, thresh):
11
+ """Dispatch to either CPU or GPU NMS implementations."""
12
+
13
+ if dets.shape[0] == 0:
14
+ return []
15
+ return cpu_nms(dets, thresh)
third_party/PIPNet/FaceBoxesV2/utils/prior_box.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from itertools import product as product
3
+ import numpy as np
4
+ from math import ceil
5
+
6
+
7
+ class PriorBox(object):
8
+ def __init__(self, cfg, image_size=None, phase='train'):
9
+ super(PriorBox, self).__init__()
10
+ #self.aspect_ratios = cfg['aspect_ratios']
11
+ self.min_sizes = cfg['min_sizes']
12
+ self.steps = cfg['steps']
13
+ self.clip = cfg['clip']
14
+ self.image_size = image_size
15
+ self.feature_maps = [[ceil(self.image_size[0]/step), ceil(self.image_size[1]/step)] for step in self.steps]
16
+
17
+ def forward(self):
18
+ anchors = []
19
+ for k, f in enumerate(self.feature_maps):
20
+ min_sizes = self.min_sizes[k]
21
+ for i, j in product(range(f[0]), range(f[1])):
22
+ for min_size in min_sizes:
23
+ s_kx = min_size / self.image_size[1]
24
+ s_ky = min_size / self.image_size[0]
25
+ if min_size == 32:
26
+ dense_cx = [x*self.steps[k]/self.image_size[1] for x in [j+0, j+0.25, j+0.5, j+0.75]]
27
+ dense_cy = [y*self.steps[k]/self.image_size[0] for y in [i+0, i+0.25, i+0.5, i+0.75]]
28
+ for cy, cx in product(dense_cy, dense_cx):
29
+ anchors += [cx, cy, s_kx, s_ky]
30
+ elif min_size == 64:
31
+ dense_cx = [x*self.steps[k]/self.image_size[1] for x in [j+0, j+0.5]]
32
+ dense_cy = [y*self.steps[k]/self.image_size[0] for y in [i+0, i+0.5]]
33
+ for cy, cx in product(dense_cy, dense_cx):
34
+ anchors += [cx, cy, s_kx, s_ky]
35
+ else:
36
+ cx = (j + 0.5) * self.steps[k] / self.image_size[1]
37
+ cy = (i + 0.5) * self.steps[k] / self.image_size[0]
38
+ anchors += [cx, cy, s_kx, s_ky]
39
+ # back to torch land
40
+ output = torch.Tensor(anchors).view(-1, 4)
41
+ if self.clip:
42
+ output.clamp_(max=1, min=0)
43
+ return output
third_party/PIPNet/FaceBoxesV2/utils/timer.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # Fast R-CNN
3
+ # Copyright (c) 2015 Microsoft
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # Written by Ross Girshick
6
+ # --------------------------------------------------------
7
+
8
+ import time
9
+
10
+
11
+ class Timer(object):
12
+ """A simple timer."""
13
+ def __init__(self):
14
+ self.total_time = 0.
15
+ self.calls = 0
16
+ self.start_time = 0.
17
+ self.diff = 0.
18
+ self.average_time = 0.
19
+
20
+ def tic(self):
21
+ # using time.time instead of time.clock because time time.clock
22
+ # does not normalize for multithreading
23
+ self.start_time = time.time()
24
+
25
+ def toc(self, average=True):
26
+ self.diff = time.time() - self.start_time
27
+ self.total_time += self.diff
28
+ self.calls += 1
29
+ self.average_time = self.total_time / self.calls
30
+ if average:
31
+ return self.average_time
32
+ else:
33
+ return self.diff
34
+
35
+ def clear(self):
36
+ self.total_time = 0.
37
+ self.calls = 0
38
+ self.start_time = 0.
39
+ self.diff = 0.
40
+ self.average_time = 0.
third_party/PIPNet/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2020 Haibo Jin
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
third_party/PIPNet/README.md ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Pixel-in-Pixel Net: Towards Efficient Facial Landmark Detection in the Wild
2
+ ## Introduction
3
+ This is the code of paper [Pixel-in-Pixel Net: Towards Efficient Facial Landmark Detection in the Wild](https://arxiv.org/abs/2003.03771). We propose a novel facial landmark detector, PIPNet, that is **fast**, **accurate**, and **robust**. PIPNet can be trained under two settings: (1) supervised learning; (2) generalizable semi-supervised learning (GSSL). With GSSL, PIPNet has better cross-domain generalization performance by utilizing massive amounts of unlabeled data across domains.
4
+
5
+ <img src="images/speed.png" alt="speed" width="640px">
6
+ Figure 1. Comparison to existing methods on speed-accuracy tradeoff, tested on WFLW full test set (closer to bottom-right corner is better).<br><br>
7
+
8
+ <img src="images/detection_heads.png" alt="det_heads" width="512px">
9
+ Figure 2. Comparison of different detection heads.<br>
10
+
11
+ ## Installation
12
+ 1. Install Python3 and PyTorch >= v1.1
13
+ 2. Clone this repository.
14
+ ```Shell
15
+ git clone https://github.com/jhb86253817/PIPNet.git
16
+ ```
17
+ 3. Install the dependencies in requirements.txt.
18
+ ```Shell
19
+ pip install -r requirements.txt
20
+ ```
21
+
22
+ ## Demo
23
+ 1. We use a [modified version](https://github.com/jhb86253817/FaceBoxesV2) of [FaceBoxes](https://github.com/zisianw/FaceBoxes.PyTorch) as the face detector, so go to folder `FaceBoxesV2/utils`, run `sh make.sh` to build for NMS.
24
+ 2. Back to folder `PIPNet`, create two empty folders `logs` and `snapshots`. For PIPNets, you can download our trained models from [here](https://drive.google.com/drive/folders/17OwDgJUfuc5_ymQ3QruD8pUnh5zHreP2?usp=sharing), and put them under folder `snapshots/DATA_NAME/EXPERIMENT_NAME/`.
25
+ 3. Edit `run_demo.sh` to choose the config file and input source you want, then run `sh run_demo.sh`. We support image, video, and camera as the input. Some sample predictions can be seen as follows.
26
+ * PIPNet-ResNet18 trained on WFLW, with image `images/1.jpg` as the input:
27
+ <img src="images/1_out_WFLW_model.jpg" alt="1_out_WFLW_model" width="400px">
28
+
29
+ * PIPNet-ResNet18 trained on WFLW, with a snippet from *Shaolin Soccer* as the input:
30
+ <img src="videos/shaolin_soccer.gif" alt="shaolin_soccer" width="400px">
31
+
32
+ * PIPNet-ResNet18 trained on WFLW, with video `videos/002.avi` as the input:
33
+ <img src="videos/002_out_WFLW_model.gif" alt="002_out_WFLW_model" width="512px">
34
+
35
+ * PIPNet-ResNet18 trained on 300W+CelebA (GSSL), with video `videos/007.avi` as the input:
36
+ <img src="videos/007_out_300W_CELEBA_model.gif" alt="007_out_300W_CELEBA_model" width="512px">
37
+
38
+ ## Training
39
+
40
+ ### Supervised Learning
41
+ Datasets: [300W](https://ibug.doc.ic.ac.uk/resources/facial-point-annotations/), [COFW](http://www.vision.caltech.edu/xpburgos/ICCV13/), [WFLW](https://wywu.github.io/projects/LAB/WFLW.html), [AFLW](https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/aflw/)
42
+
43
+ 1. Download the datasets from official sources, then put them under folder `data`. The folder structure should look like this:
44
+ ````
45
+ PIPNet
46
+ -- FaceBoxesV2
47
+ -- lib
48
+ -- experiments
49
+ -- logs
50
+ -- snapshots
51
+ -- data
52
+ |-- data_300W
53
+ |-- afw
54
+ |-- helen
55
+ |-- ibug
56
+ |-- lfpw
57
+ |-- COFW
58
+ |-- COFW_train_color.mat
59
+ |-- COFW_test_color.mat
60
+ |-- WFLW
61
+ |-- WFLW_images
62
+ |-- WFLW_annotations
63
+ |-- AFLW
64
+ |-- flickr
65
+ |-- AFLWinfo_release.mat
66
+ ````
67
+ 2. Go to folder `lib`, preprocess a dataset by running ```python preprocess.py DATA_NAME```. For example, to process 300W:
68
+ ```
69
+ python preprocess.py data_300W
70
+ ```
71
+ 3. Back to folder `PIPNet`, edit `run_train.sh` to choose the config file you want. Then, train the model by running:
72
+ ```
73
+ sh run_train.sh
74
+ ```
75
+
76
+ ### Generalizable Semi-supervised Learning
77
+ Datasets:
78
+ * data_300W_COFW_WFLW: 300W + COFW-68 (unlabeled) + WFLW-68 (unlabeled)
79
+ * data_300W_CELEBA: 300W + CelebA (unlabeled)
80
+
81
+ 1. Download 300W, COFW, and WFLW as in the supervised learning setting. Download annotations of COFW-68 test from [here](https://github.com/golnazghiasi/cofw68-benchmark). For 300W+CelebA, you also need to download the in-the-wild CelebA images from [here](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html), and the [face bounding boxes](https://drive.google.com/drive/folders/17OwDgJUfuc5_ymQ3QruD8pUnh5zHreP2?usp=sharing) detected by us. The folder structure should look like this:
82
+ ````
83
+ PIPNet
84
+ -- FaceBoxesV2
85
+ -- lib
86
+ -- experiments
87
+ -- logs
88
+ -- snapshots
89
+ -- data
90
+ |-- data_300W
91
+ |-- afw
92
+ |-- helen
93
+ |-- ibug
94
+ |-- lfpw
95
+ |-- COFW
96
+ |-- COFW_train_color.mat
97
+ |-- COFW_test_color.mat
98
+ |-- WFLW
99
+ |-- WFLW_images
100
+ |-- WFLW_annotations
101
+ |-- data_300W_COFW_WFLW
102
+ |-- cofw68_test_annotations
103
+ |-- cofw68_test_bboxes.mat
104
+ |-- CELEBA
105
+ |-- img_celeba
106
+ |-- celeba_bboxes.txt
107
+ |-- data_300W_CELEBA
108
+ |-- cofw68_test_annotations
109
+ |-- cofw68_test_bboxes.mat
110
+ ````
111
+ 2. Go to folder `lib`, preprocess a dataset by running ```python preprocess_gssl.py DATA_NAME```.
112
+ To process data_300W_COFW_WFLW, run
113
+ ```
114
+ python preprocess_gssl.py data_300W_COFW_WFLW
115
+ ```
116
+ To process data_300W_CELEBA, run
117
+ ```
118
+ python preprocess_gssl.py CELEBA
119
+ ```
120
+ and
121
+ ```
122
+ python preprocess_gssl.py data_300W_CELEBA
123
+ ```
124
+ 3. Back to folder `PIPNet`, edit `run_train.sh` to choose the config file you want. Then, train the model by running:
125
+ ```
126
+ sh run_train.sh
127
+ ```
128
+
129
+ ## Evaluation
130
+ 1. Edit `run_test.sh` to choose the config file you want. Then, test the model by running:
131
+ ```
132
+ sh run_test.sh
133
+ ```
134
+
135
+ ## Citation
136
+ ````
137
+ @article{JLS21,
138
+ title={Pixel-in-Pixel Net: Towards Efficient Facial Landmark Detection in the Wild},
139
+ author={Haibo Jin and Shengcai Liao and Ling Shao},
140
+ journal={International Journal of Computer Vision},
141
+ publisher={Springer Science and Business Media LLC},
142
+ ISSN={1573-1405},
143
+ url={http://dx.doi.org/10.1007/s11263-021-01521-4},
144
+ DOI={10.1007/s11263-021-01521-4},
145
+ year={2021},
146
+ month={Sep}
147
+ }
148
+ ````
149
+
150
+ ## Acknowledgement
151
+ We thank the following great works:
152
+ * [human-pose-estimation.pytorch](https://github.com/microsoft/human-pose-estimation.pytorch)
153
+ * [HRNet-Facial-Landmark-Detection](https://github.com/HRNet/HRNet-Facial-Landmark-Detection)
third_party/PIPNet/lib/data_utils.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.utils.data as data
2
+ import torch
3
+ from PIL import Image, ImageFilter
4
+ import os, cv2
5
+ import numpy as np
6
+ import random
7
+ from scipy.stats import norm
8
+ from math import floor
9
+
10
+ def random_translate(image, target):
11
+ if random.random() > 0.5:
12
+ image_height, image_width = image.size
13
+ a = 1
14
+ b = 0
15
+ #c = 30 #left/right (i.e. 5/-5)
16
+ c = int((random.random()-0.5) * 60)
17
+ d = 0
18
+ e = 1
19
+ #f = 30 #up/down (i.e. 5/-5)
20
+ f = int((random.random()-0.5) * 60)
21
+ image = image.transform(image.size, Image.AFFINE, (a, b, c, d, e, f))
22
+ target_translate = target.copy()
23
+ target_translate = target_translate.reshape(-1, 2)
24
+ target_translate[:, 0] -= 1.*c/image_width
25
+ target_translate[:, 1] -= 1.*f/image_height
26
+ target_translate = target_translate.flatten()
27
+ target_translate[target_translate < 0] = 0
28
+ target_translate[target_translate > 1] = 1
29
+ return image, target_translate
30
+ else:
31
+ return image, target
32
+
33
+ def random_blur(image):
34
+ if random.random() > 0.7:
35
+ image = image.filter(ImageFilter.GaussianBlur(random.random()*5))
36
+ return image
37
+
38
+ def random_occlusion(image):
39
+ if random.random() > 0.5:
40
+ image_np = np.array(image).astype(np.uint8)
41
+ image_np = image_np[:,:,::-1]
42
+ image_height, image_width, _ = image_np.shape
43
+ occ_height = int(image_height*0.4*random.random())
44
+ occ_width = int(image_width*0.4*random.random())
45
+ occ_xmin = int((image_width - occ_width - 10) * random.random())
46
+ occ_ymin = int((image_height - occ_height - 10) * random.random())
47
+ image_np[occ_ymin:occ_ymin+occ_height, occ_xmin:occ_xmin+occ_width, 0] = int(random.random() * 255)
48
+ image_np[occ_ymin:occ_ymin+occ_height, occ_xmin:occ_xmin+occ_width, 1] = int(random.random() * 255)
49
+ image_np[occ_ymin:occ_ymin+occ_height, occ_xmin:occ_xmin+occ_width, 2] = int(random.random() * 255)
50
+ image_pil = Image.fromarray(image_np[:,:,::-1].astype('uint8'), 'RGB')
51
+ return image_pil
52
+ else:
53
+ return image
54
+
55
+ def random_flip(image, target, points_flip):
56
+ if random.random() > 0.5:
57
+ image = image.transpose(Image.FLIP_LEFT_RIGHT)
58
+ target = np.array(target).reshape(-1, 2)
59
+ target = target[points_flip, :]
60
+ target[:,0] = 1-target[:,0]
61
+ target = target.flatten()
62
+ return image, target
63
+ else:
64
+ return image, target
65
+
66
+ def random_rotate(image, target, angle_max):
67
+ if random.random() > 0.5:
68
+ center_x = 0.5
69
+ center_y = 0.5
70
+ landmark_num= int(len(target) / 2)
71
+ target_center = np.array(target) - np.array([center_x, center_y]*landmark_num)
72
+ target_center = target_center.reshape(landmark_num, 2)
73
+ theta_max = np.radians(angle_max)
74
+ theta = random.uniform(-theta_max, theta_max)
75
+ angle = np.degrees(theta)
76
+ image = image.rotate(angle)
77
+
78
+ c, s = np.cos(theta), np.sin(theta)
79
+ rot = np.array(((c,-s), (s, c)))
80
+ target_center_rot = np.matmul(target_center, rot)
81
+ target_rot = target_center_rot.reshape(landmark_num*2) + np.array([center_x, center_y]*landmark_num)
82
+ return image, target_rot
83
+ else:
84
+ return image, target
85
+
86
+ def gen_target_pip(target, meanface_indices, target_map, target_local_x, target_local_y, target_nb_x, target_nb_y):
87
+ num_nb = len(meanface_indices[0])
88
+ map_channel, map_height, map_width = target_map.shape
89
+ target = target.reshape(-1, 2)
90
+ assert map_channel == target.shape[0]
91
+
92
+ for i in range(map_channel):
93
+ mu_x = int(floor(target[i][0] * map_width))
94
+ mu_y = int(floor(target[i][1] * map_height))
95
+ mu_x = max(0, mu_x)
96
+ mu_y = max(0, mu_y)
97
+ mu_x = min(mu_x, map_width-1)
98
+ mu_y = min(mu_y, map_height-1)
99
+ target_map[i, mu_y, mu_x] = 1
100
+ shift_x = target[i][0] * map_width - mu_x
101
+ shift_y = target[i][1] * map_height - mu_y
102
+ target_local_x[i, mu_y, mu_x] = shift_x
103
+ target_local_y[i, mu_y, mu_x] = shift_y
104
+
105
+ for j in range(num_nb):
106
+ nb_x = target[meanface_indices[i][j]][0] * map_width - mu_x
107
+ nb_y = target[meanface_indices[i][j]][1] * map_height - mu_y
108
+ target_nb_x[num_nb*i+j, mu_y, mu_x] = nb_x
109
+ target_nb_y[num_nb*i+j, mu_y, mu_x] = nb_y
110
+
111
+ return target_map, target_local_x, target_local_y, target_nb_x, target_nb_y
112
+
113
+ class ImageFolder_pip(data.Dataset):
114
+ def __init__(self, root, imgs, input_size, num_lms, net_stride, points_flip, meanface_indices, transform=None, target_transform=None):
115
+ self.root = root
116
+ self.imgs = imgs
117
+ self.num_lms = num_lms
118
+ self.net_stride = net_stride
119
+ self.points_flip = points_flip
120
+ self.meanface_indices = meanface_indices
121
+ self.num_nb = len(meanface_indices[0])
122
+ self.transform = transform
123
+ self.target_transform = target_transform
124
+ self.input_size = input_size
125
+
126
+ def __getitem__(self, index):
127
+
128
+ img_name, target = self.imgs[index]
129
+
130
+ img = Image.open(os.path.join(self.root, img_name)).convert('RGB')
131
+ img, target = random_translate(img, target)
132
+ img = random_occlusion(img)
133
+ img, target = random_flip(img, target, self.points_flip)
134
+ img, target = random_rotate(img, target, 30)
135
+ img = random_blur(img)
136
+
137
+ target_map = np.zeros((self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
138
+ target_local_x = np.zeros((self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
139
+ target_local_y = np.zeros((self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
140
+ target_nb_x = np.zeros((self.num_nb*self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
141
+ target_nb_y = np.zeros((self.num_nb*self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
142
+ target_map, target_local_x, target_local_y, target_nb_x, target_nb_y = gen_target_pip(target, self.meanface_indices, target_map, target_local_x, target_local_y, target_nb_x, target_nb_y)
143
+
144
+ target_map = torch.from_numpy(target_map).float()
145
+ target_local_x = torch.from_numpy(target_local_x).float()
146
+ target_local_y = torch.from_numpy(target_local_y).float()
147
+ target_nb_x = torch.from_numpy(target_nb_x).float()
148
+ target_nb_y = torch.from_numpy(target_nb_y).float()
149
+
150
+ if self.transform is not None:
151
+ img = self.transform(img)
152
+ if self.target_transform is not None:
153
+ target_map = self.target_transform(target_map)
154
+ target_local_x = self.target_transform(target_local_x)
155
+ target_local_y = self.target_transform(target_local_y)
156
+ target_nb_x = self.target_transform(target_nb_x)
157
+ target_nb_y = self.target_transform(target_nb_y)
158
+
159
+ return img, target_map, target_local_x, target_local_y, target_nb_x, target_nb_y
160
+
161
+ def __len__(self):
162
+ return len(self.imgs)
163
+
164
+ if __name__ == '__main__':
165
+ pass
166
+
third_party/PIPNet/lib/data_utils_gssl.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.utils.data as data
2
+ import torch
3
+ from PIL import Image, ImageFilter
4
+ import os, cv2
5
+ import numpy as np
6
+ import random
7
+ from scipy.stats import norm
8
+ from math import floor
9
+
10
+ def random_translate(image, target):
11
+ if random.random() > 0.5:
12
+ image_height, image_width = image.size
13
+ a = 1
14
+ b = 0
15
+ #c = 30 #left/right (i.e. 5/-5)
16
+ c = int((random.random()-0.5) * 60)
17
+ d = 0
18
+ e = 1
19
+ #f = 30 #up/down (i.e. 5/-5)
20
+ f = int((random.random()-0.5) * 60)
21
+ image = image.transform(image.size, Image.AFFINE, (a, b, c, d, e, f))
22
+ target_translate = target.copy()
23
+ target_translate = target_translate.reshape(-1, 2)
24
+ target_translate[:, 0] -= 1.*c/image_width
25
+ target_translate[:, 1] -= 1.*f/image_height
26
+ target_translate = target_translate.flatten()
27
+ target_translate[target_translate < 0] = 0
28
+ target_translate[target_translate > 1] = 1
29
+ return image, target_translate
30
+ else:
31
+ return image, target
32
+
33
+ def random_blur(image):
34
+ if random.random() > 0.7:
35
+ image = image.filter(ImageFilter.GaussianBlur(random.random()*5))
36
+ return image
37
+
38
+ def random_occlusion(image):
39
+ if random.random() > 0.5:
40
+ image_np = np.array(image).astype(np.uint8)
41
+ image_np = image_np[:,:,::-1]
42
+ image_height, image_width, _ = image_np.shape
43
+ occ_height = int(image_height*0.4*random.random())
44
+ occ_width = int(image_width*0.4*random.random())
45
+ occ_xmin = int((image_width - occ_width - 10) * random.random())
46
+ occ_ymin = int((image_height - occ_height - 10) * random.random())
47
+ image_np[occ_ymin:occ_ymin+occ_height, occ_xmin:occ_xmin+occ_width, 0] = int(random.random() * 255)
48
+ image_np[occ_ymin:occ_ymin+occ_height, occ_xmin:occ_xmin+occ_width, 1] = int(random.random() * 255)
49
+ image_np[occ_ymin:occ_ymin+occ_height, occ_xmin:occ_xmin+occ_width, 2] = int(random.random() * 255)
50
+ image_pil = Image.fromarray(image_np[:,:,::-1].astype('uint8'), 'RGB')
51
+ return image_pil
52
+ else:
53
+ return image
54
+
55
+ def random_flip(image, target, points_flip):
56
+ if random.random() > 0.5:
57
+ image = image.transpose(Image.FLIP_LEFT_RIGHT)
58
+ target = np.array(target).reshape(-1, 2)
59
+ target = target[points_flip, :]
60
+ target[:,0] = 1-target[:,0]
61
+ target = target.flatten()
62
+ return image, target
63
+ else:
64
+ return image, target
65
+
66
+ def random_rotate(image, target, angle_max):
67
+ if random.random() > 0.5:
68
+ center_x = 0.5
69
+ center_y = 0.5
70
+ landmark_num= int(len(target) / 2)
71
+ target_center = np.array(target) - np.array([center_x, center_y]*landmark_num)
72
+ target_center = target_center.reshape(landmark_num, 2)
73
+ theta_max = np.radians(angle_max)
74
+ theta = random.uniform(-theta_max, theta_max)
75
+ angle = np.degrees(theta)
76
+ image = image.rotate(angle)
77
+
78
+ c, s = np.cos(theta), np.sin(theta)
79
+ rot = np.array(((c,-s), (s, c)))
80
+ target_center_rot = np.matmul(target_center, rot)
81
+ target_rot = target_center_rot.reshape(landmark_num*2) + np.array([center_x, center_y]*landmark_num)
82
+ return image, target_rot
83
+ else:
84
+ return image, target
85
+
86
+ def gen_target_pip(target, meanface_indices, target_map1, target_map2, target_map3, target_local_x, target_local_y, target_nb_x, target_nb_y):
87
+ num_nb = len(meanface_indices[0])
88
+ map_channel1, map_height1, map_width1 = target_map1.shape
89
+ map_channel2, map_height2, map_width2 = target_map2.shape
90
+ map_channel3, map_height3, map_width3 = target_map3.shape
91
+ target = target.reshape(-1, 2)
92
+ assert map_channel1 == target.shape[0]
93
+
94
+ for i in range(map_channel1):
95
+ mu_x1 = int(floor(target[i][0] * map_width1))
96
+ mu_y1 = int(floor(target[i][1] * map_height1))
97
+ mu_x1 = max(0, mu_x1)
98
+ mu_y1 = max(0, mu_y1)
99
+ mu_x1 = min(mu_x1, map_width1-1)
100
+ mu_y1 = min(mu_y1, map_height1-1)
101
+ target_map1[i, mu_y1, mu_x1] = 1
102
+
103
+ shift_x = target[i][0] * map_width1 - mu_x1
104
+ shift_y = target[i][1] * map_height1 - mu_y1
105
+ target_local_x[i, mu_y1, mu_x1] = shift_x
106
+ target_local_y[i, mu_y1, mu_x1] = shift_y
107
+
108
+ for j in range(num_nb):
109
+ nb_x = target[meanface_indices[i][j]][0] * map_width1 - mu_x1
110
+ nb_y = target[meanface_indices[i][j]][1] * map_height1 - mu_y1
111
+ target_nb_x[num_nb*i+j, mu_y1, mu_x1] = nb_x
112
+ target_nb_y[num_nb*i+j, mu_y1, mu_x1] = nb_y
113
+
114
+ mu_x2 = int(floor(target[i][0] * map_width2))
115
+ mu_y2 = int(floor(target[i][1] * map_height2))
116
+ mu_x2 = max(0, mu_x2)
117
+ mu_y2 = max(0, mu_y2)
118
+ mu_x2 = min(mu_x2, map_width2-1)
119
+ mu_y2 = min(mu_y2, map_height2-1)
120
+ target_map2[i, mu_y2, mu_x2] = 1
121
+
122
+ mu_x3 = int(floor(target[i][0] * map_width3))
123
+ mu_y3 = int(floor(target[i][1] * map_height3))
124
+ mu_x3 = max(0, mu_x3)
125
+ mu_y3 = max(0, mu_y3)
126
+ mu_x3 = min(mu_x3, map_width3-1)
127
+ mu_y3 = min(mu_y3, map_height3-1)
128
+ target_map3[i, mu_y3, mu_x3] = 1
129
+
130
+ return target_map1, target_map2, target_map3, target_local_x, target_local_y, target_nb_x, target_nb_y
131
+
132
+ def gen_target_pip_cls1(target, target_map1):
133
+ map_channel1, map_height1, map_width1 = target_map1.shape
134
+ target = target.reshape(-1, 2)
135
+ assert map_channel1 == target.shape[0]
136
+
137
+ for i in range(map_channel1):
138
+ mu_x1 = int(floor(target[i][0] * map_width1))
139
+ mu_y1 = int(floor(target[i][1] * map_height1))
140
+ mu_x1 = max(0, mu_x1)
141
+ mu_y1 = max(0, mu_y1)
142
+ mu_x1 = min(mu_x1, map_width1-1)
143
+ mu_y1 = min(mu_y1, map_height1-1)
144
+ target_map1[i, mu_y1, mu_x1] = 1
145
+
146
+ return target_map1
147
+
148
+ def gen_target_pip_cls2(target, target_map2):
149
+ map_channel2, map_height2, map_width2 = target_map2.shape
150
+ target = target.reshape(-1, 2)
151
+ assert map_channel2 == target.shape[0]
152
+
153
+ for i in range(map_channel2):
154
+ mu_x2 = int(floor(target[i][0] * map_width2))
155
+ mu_y2 = int(floor(target[i][1] * map_height2))
156
+ mu_x2 = max(0, mu_x2)
157
+ mu_y2 = max(0, mu_y2)
158
+ mu_x2 = min(mu_x2, map_width2-1)
159
+ mu_y2 = min(mu_y2, map_height2-1)
160
+ target_map2[i, mu_y2, mu_x2] = 1
161
+
162
+ return target_map2
163
+
164
+ def gen_target_pip_cls3(target, target_map3):
165
+ map_channel3, map_height3, map_width3 = target_map3.shape
166
+ target = target.reshape(-1, 2)
167
+ assert map_channel3 == target.shape[0]
168
+
169
+ for i in range(map_channel3):
170
+ mu_x3 = int(floor(target[i][0] * map_width3))
171
+ mu_y3 = int(floor(target[i][1] * map_height3))
172
+ mu_x3 = max(0, mu_x3)
173
+ mu_y3 = max(0, mu_y3)
174
+ mu_x3 = min(mu_x3, map_width3-1)
175
+ mu_y3 = min(mu_y3, map_height3-1)
176
+ target_map3[i, mu_y3, mu_x3] = 1
177
+
178
+ return target_map3
179
+
180
+ class ImageFolder_pip(data.Dataset):
181
+ def __init__(self, root, imgs, input_size, num_lms, net_stride, points_flip, meanface_indices, transform=None, target_transform=None):
182
+ self.root = root
183
+ self.imgs = imgs
184
+ self.num_lms = num_lms
185
+ self.net_stride = net_stride
186
+ self.points_flip = points_flip
187
+ self.meanface_indices = meanface_indices
188
+ self.num_nb = len(meanface_indices[0])
189
+ self.transform = transform
190
+ self.target_transform = target_transform
191
+ self.input_size = input_size
192
+
193
+ def __getitem__(self, index):
194
+ """
195
+ Args:
196
+ index (int): Index
197
+ Returns:
198
+ tuple: (image, target) where target is class_index of the target class.
199
+ """
200
+ img_name, target_type, target = self.imgs[index]
201
+ img = Image.open(os.path.join(self.root, img_name)).convert('RGB')
202
+
203
+ img, target = random_translate(img, target)
204
+ img = random_occlusion(img)
205
+ img, target = random_flip(img, target, self.points_flip)
206
+ img, target = random_rotate(img, target, 30)
207
+ img = random_blur(img)
208
+
209
+ target_map1 = np.zeros((self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
210
+ target_map2 = np.zeros((self.num_lms, int(self.input_size/self.net_stride/2), int(self.input_size/self.net_stride/2)))
211
+ target_map3 = np.zeros((self.num_lms, int(self.input_size/self.net_stride/4), int(self.input_size/self.net_stride/4)))
212
+ target_local_x = np.zeros((self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
213
+ target_local_y = np.zeros((self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
214
+ target_nb_x = np.zeros((self.num_nb*self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
215
+ target_nb_y = np.zeros((self.num_nb*self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
216
+
217
+ mask_map1 = np.ones((self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
218
+ mask_map2 = np.ones((self.num_lms, int(self.input_size/self.net_stride/2), int(self.input_size/self.net_stride/2)))
219
+ mask_map3 = np.ones((self.num_lms, int(self.input_size/self.net_stride/4), int(self.input_size/self.net_stride/4)))
220
+ mask_local_x = np.ones((self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
221
+ mask_local_y = np.ones((self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
222
+ mask_nb_x = np.ones((self.num_nb*self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
223
+ mask_nb_y = np.ones((self.num_nb*self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
224
+
225
+ if target_type == 'std':
226
+ target_map1, target_map2, target_map3, target_local_x, target_local_y, target_nb_x, target_nb_y = gen_target_pip(target, self.meanface_indices, target_map1, target_map2, target_map3, target_local_x, target_local_y, target_nb_x, target_nb_y)
227
+ mask_map2 = np.zeros((self.num_lms, int(self.input_size/self.net_stride/2), int(self.input_size/self.net_stride/2)))
228
+ mask_map3 = np.zeros((self.num_lms, int(self.input_size/self.net_stride/4), int(self.input_size/self.net_stride/4)))
229
+ elif target_type == 'cls1':
230
+ target_map1 = gen_target_pip_cls1(target, target_map1)
231
+ mask_map2 = np.zeros((self.num_lms, int(self.input_size/self.net_stride/2), int(self.input_size/self.net_stride/2)))
232
+ mask_map3 = np.zeros((self.num_lms, int(self.input_size/self.net_stride/4), int(self.input_size/self.net_stride/4)))
233
+ mask_local_x = np.zeros((self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
234
+ mask_local_y = np.zeros((self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
235
+ mask_nb_x = np.zeros((self.num_nb*self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
236
+ mask_nb_y = np.zeros((self.num_nb*self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
237
+ elif target_type == 'cls2':
238
+ target_map2 = gen_target_pip_cls2(target, target_map2)
239
+ mask_map1 = np.zeros((self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
240
+ mask_map3 = np.zeros((self.num_lms, int(self.input_size/self.net_stride/4), int(self.input_size/self.net_stride/4)))
241
+ mask_local_x = np.zeros((self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
242
+ mask_local_y = np.zeros((self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
243
+ mask_nb_x = np.zeros((self.num_nb*self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
244
+ mask_nb_y = np.zeros((self.num_nb*self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
245
+ elif target_type == 'cls3':
246
+ target_map3 = gen_target_pip_cls3(target, target_map3)
247
+ mask_map1 = np.zeros((self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
248
+ mask_map2 = np.zeros((self.num_lms, int(self.input_size/self.net_stride/2), int(self.input_size/self.net_stride/2)))
249
+ mask_local_x = np.zeros((self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
250
+ mask_local_y = np.zeros((self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
251
+ mask_nb_x = np.zeros((self.num_nb*self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
252
+ mask_nb_y = np.zeros((self.num_nb*self.num_lms, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
253
+ else:
254
+ print('No such target type!')
255
+ exit(0)
256
+
257
+ target_map1 = torch.from_numpy(target_map1).float()
258
+ target_map2 = torch.from_numpy(target_map2).float()
259
+ target_map3 = torch.from_numpy(target_map3).float()
260
+ target_local_x = torch.from_numpy(target_local_x).float()
261
+ target_local_y = torch.from_numpy(target_local_y).float()
262
+ target_nb_x = torch.from_numpy(target_nb_x).float()
263
+ target_nb_y = torch.from_numpy(target_nb_y).float()
264
+ mask_map1 = torch.from_numpy(mask_map1).float()
265
+ mask_map2 = torch.from_numpy(mask_map2).float()
266
+ mask_map3 = torch.from_numpy(mask_map3).float()
267
+ mask_local_x = torch.from_numpy(mask_local_x).float()
268
+ mask_local_y = torch.from_numpy(mask_local_y).float()
269
+ mask_nb_x = torch.from_numpy(mask_nb_x).float()
270
+ mask_nb_y = torch.from_numpy(mask_nb_y).float()
271
+
272
+ if self.transform is not None:
273
+ img = self.transform(img)
274
+ if self.target_transform is not None:
275
+ target_map1 = self.target_transform(target_map1)
276
+ target_map2 = self.target_transform(target_map2)
277
+ target_map3 = self.target_transform(target_map3)
278
+ target_local_x = self.target_transform(target_local_x)
279
+ target_local_y = self.target_transform(target_local_y)
280
+ target_nb_x = self.target_transform(target_nb_x)
281
+ target_nb_y = self.target_transform(target_nb_y)
282
+
283
+ return img, target_map1, target_map2, target_map3, target_local_x, target_local_y, target_nb_x, target_nb_y, mask_map1, mask_map2, mask_map3, mask_local_x, mask_local_y, mask_nb_x, mask_nb_y
284
+
285
+ def __len__(self):
286
+ return len(self.imgs)
287
+
288
+ if __name__ == '__main__':
289
+ pass
290
+
third_party/PIPNet/lib/demo.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import sys
3
+
4
+ sys.path.insert(0, "FaceBoxesV2")
5
+ sys.path.insert(0, "..")
6
+ from math import floor
7
+ from faceboxes_detector import *
8
+
9
+ import torch
10
+ import torch.nn.parallel
11
+ import torch.utils.data
12
+ import torchvision.transforms as transforms
13
+ import torchvision.models as models
14
+
15
+ from networks import *
16
+ from functions import *
17
+ from PIPNet.reverse_index import ri1, ri2
18
+
19
+
20
+ class Config:
21
+ def __init__(self):
22
+ self.det_head = "pip"
23
+ self.net_stride = 32
24
+ self.batch_size = 16
25
+ self.init_lr = 0.0001
26
+ self.num_epochs = 60
27
+ self.decay_steps = [30, 50]
28
+ self.input_size = 256
29
+ self.backbone = "resnet101"
30
+ self.pretrained = True
31
+ self.criterion_cls = "l2"
32
+ self.criterion_reg = "l1"
33
+ self.cls_loss_weight = 10
34
+ self.reg_loss_weight = 1
35
+ self.num_lms = 98
36
+ self.save_interval = self.num_epochs
37
+ self.num_nb = 10
38
+ self.use_gpu = True
39
+ self.gpu_id = 3
40
+
41
+
42
+ def get_lmk_model():
43
+
44
+ cfg = Config()
45
+
46
+ resnet101 = models.resnet101(pretrained=cfg.pretrained)
47
+ net = Pip_resnet101(
48
+ resnet101,
49
+ cfg.num_nb,
50
+ num_lms=cfg.num_lms,
51
+ input_size=cfg.input_size,
52
+ net_stride=cfg.net_stride,
53
+ )
54
+
55
+ if cfg.use_gpu:
56
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
57
+ else:
58
+ device = torch.device("cpu")
59
+ net = net.to(device)
60
+
61
+ weight_file = "/apdcephfs/share_1290939/ahbanliang/codes/PIPNet/snapshots/WFLW/pip_32_16_60_r101_l2_l1_10_1_nb10/epoch59.pth"
62
+ state_dict = torch.load(weight_file, map_location=device)
63
+ net.load_state_dict(state_dict)
64
+
65
+ detector = FaceBoxesDetector(
66
+ "FaceBoxes",
67
+ "FaceBoxesV2/weights/FaceBoxesV2.pth",
68
+ use_gpu=True,
69
+ device="cuda:0",
70
+ )
71
+ return net, detector
72
+
73
+
74
+ def demo_image(
75
+ image_file,
76
+ net,
77
+ detector,
78
+ input_size=256,
79
+ net_stride=32,
80
+ num_nb=10,
81
+ use_gpu=True,
82
+ device="cuda:0",
83
+ ):
84
+
85
+ my_thresh = 0.6
86
+ det_box_scale = 1.2
87
+ net.eval()
88
+ preprocess = transforms.Compose(
89
+ [
90
+ transforms.Resize((256, 256)),
91
+ transforms.ToTensor(),
92
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
93
+ ]
94
+ )
95
+ reverse_index1, reverse_index2, max_len = ri1, ri2, 17
96
+ image = cv2.imread(image_file)
97
+ image_height, image_width, _ = image.shape
98
+ detections, _ = detector.detect(image, my_thresh, 1)
99
+ for i in range(len(detections)):
100
+ det_xmin = detections[i][2]
101
+ det_ymin = detections[i][3]
102
+ det_width = detections[i][4]
103
+ det_height = detections[i][5]
104
+ det_xmax = det_xmin + det_width - 1
105
+ det_ymax = det_ymin + det_height - 1
106
+
107
+ det_xmin -= int(det_width * (det_box_scale - 1) / 2)
108
+ # remove a part of top area for alignment, see paper for details
109
+ det_ymin += int(det_height * (det_box_scale - 1) / 2)
110
+ det_xmax += int(det_width * (det_box_scale - 1) / 2)
111
+ det_ymax += int(det_height * (det_box_scale - 1) / 2)
112
+ det_xmin = max(det_xmin, 0)
113
+ det_ymin = max(det_ymin, 0)
114
+ det_xmax = min(det_xmax, image_width - 1)
115
+ det_ymax = min(det_ymax, image_height - 1)
116
+ det_width = det_xmax - det_xmin + 1
117
+ det_height = det_ymax - det_ymin + 1
118
+ cv2.rectangle(image, (det_xmin, det_ymin), (det_xmax, det_ymax), (0, 0, 255), 2)
119
+ det_crop = image[det_ymin:det_ymax, det_xmin:det_xmax, :]
120
+ det_crop = cv2.resize(det_crop, (input_size, input_size))
121
+ inputs = Image.fromarray(det_crop[:, :, ::-1].astype("uint8"), "RGB")
122
+ inputs = preprocess(inputs).unsqueeze(0)
123
+ inputs = inputs.to(device)
124
+ (
125
+ lms_pred_x,
126
+ lms_pred_y,
127
+ lms_pred_nb_x,
128
+ lms_pred_nb_y,
129
+ outputs_cls,
130
+ max_cls,
131
+ ) = forward_pip(net, inputs, preprocess, input_size, net_stride, num_nb)
132
+ lms_pred = torch.cat((lms_pred_x, lms_pred_y), dim=1).flatten()
133
+ tmp_nb_x = lms_pred_nb_x[reverse_index1, reverse_index2].view(98, max_len)
134
+ tmp_nb_y = lms_pred_nb_y[reverse_index1, reverse_index2].view(98, max_len)
135
+ tmp_x = torch.mean(torch.cat((lms_pred_x, tmp_nb_x), dim=1), dim=1).view(-1, 1)
136
+ tmp_y = torch.mean(torch.cat((lms_pred_y, tmp_nb_y), dim=1), dim=1).view(-1, 1)
137
+ lms_pred_merge = torch.cat((tmp_x, tmp_y), dim=1).flatten()
138
+ lms_pred = lms_pred.cpu().numpy()
139
+ lms_pred_merge = lms_pred_merge.cpu().numpy()
140
+ for i in range(98):
141
+ x_pred = lms_pred_merge[i * 2] * det_width
142
+ y_pred = lms_pred_merge[i * 2 + 1] * det_height
143
+ cv2.circle(
144
+ image,
145
+ (int(x_pred) + det_xmin, int(y_pred) + det_ymin),
146
+ 1,
147
+ (0, 0, 255),
148
+ 2,
149
+ )
150
+ cv2.imwrite("images/1_out.jpg", image)
151
+
152
+
153
+ if __name__ == "__main__":
154
+ net, detector = get_lmk_model()
155
+ demo_image(
156
+ "/apdcephfs/private_ahbanliang/codes/Real-ESRGAN-master/tmp_frames/yanikefu/frame00000046.png",
157
+ net,
158
+ detector,
159
+ )
third_party/PIPNet/lib/demo_video.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2, os
2
+ import sys
3
+ sys.path.insert(0, 'FaceBoxesV2')
4
+ sys.path.insert(0, '..')
5
+ import numpy as np
6
+ import pickle
7
+ import importlib
8
+ from math import floor
9
+ from faceboxes_detector import *
10
+ import time
11
+
12
+ import torch
13
+ import torch.nn as nn
14
+ import torch.nn.parallel
15
+ import torch.optim as optim
16
+ import torch.utils.data
17
+ import torch.nn.functional as F
18
+ import torchvision.transforms as transforms
19
+ import torchvision.datasets as datasets
20
+ import torchvision.models as models
21
+
22
+ from networks import *
23
+ import data_utils
24
+ from functions import *
25
+
26
+ if not len(sys.argv) == 3:
27
+ print('Format:')
28
+ print('python lib/demo_video.py config_file video_file')
29
+ exit(0)
30
+ experiment_name = sys.argv[1].split('/')[-1][:-3]
31
+ data_name = sys.argv[1].split('/')[-2]
32
+ config_path = '.experiments.{}.{}'.format(data_name, experiment_name)
33
+ video_file = sys.argv[2]
34
+
35
+ my_config = importlib.import_module(config_path, package='PIPNet')
36
+ Config = getattr(my_config, 'Config')
37
+ cfg = Config()
38
+ cfg.experiment_name = experiment_name
39
+ cfg.data_name = data_name
40
+
41
+ save_dir = os.path.join('./snapshots', cfg.data_name, cfg.experiment_name)
42
+
43
+ meanface_indices, reverse_index1, reverse_index2, max_len = get_meanface(os.path.join('data', cfg.data_name, 'meanface.txt'), cfg.num_nb)
44
+
45
+ if cfg.backbone == 'resnet18':
46
+ resnet18 = models.resnet18(pretrained=cfg.pretrained)
47
+ net = Pip_resnet18(resnet18, cfg.num_nb, num_lms=cfg.num_lms, input_size=cfg.input_size, net_stride=cfg.net_stride)
48
+ elif cfg.backbone == 'resnet50':
49
+ resnet50 = models.resnet50(pretrained=cfg.pretrained)
50
+ net = Pip_resnet50(resnet50, cfg.num_nb, num_lms=cfg.num_lms, input_size=cfg.input_size, net_stride=cfg.net_stride)
51
+ elif cfg.backbone == 'resnet101':
52
+ resnet101 = models.resnet101(pretrained=cfg.pretrained)
53
+ net = Pip_resnet101(resnet101, cfg.num_nb, num_lms=cfg.num_lms, input_size=cfg.input_size, net_stride=cfg.net_stride)
54
+ else:
55
+ print('No such backbone!')
56
+ exit(0)
57
+
58
+ if cfg.use_gpu:
59
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
60
+ else:
61
+ device = torch.device("cpu")
62
+ net = net.to(device)
63
+
64
+ weight_file = os.path.join(save_dir, 'epoch%d.pth' % (cfg.num_epochs-1))
65
+ state_dict = torch.load(weight_file, map_location=device)
66
+ net.load_state_dict(state_dict)
67
+
68
+ normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
69
+ std=[0.229, 0.224, 0.225])
70
+ preprocess = transforms.Compose([transforms.Resize((cfg.input_size, cfg.input_size)), transforms.ToTensor(), normalize])
71
+
72
+ def demo_video(video_file, net, preprocess, input_size, net_stride, num_nb, use_gpu, device):
73
+ detector = FaceBoxesDetector('FaceBoxes', 'FaceBoxesV2/weights/FaceBoxesV2.pth', use_gpu, device)
74
+ my_thresh = 0.9
75
+ det_box_scale = 1.2
76
+
77
+ net.eval()
78
+ if video_file == 'camera':
79
+ cap = cv2.VideoCapture(0)
80
+ else:
81
+ cap = cv2.VideoCapture(video_file)
82
+ if (cap.isOpened()== False):
83
+ print("Error opening video stream or file")
84
+ frame_width = int(cap.get(3))
85
+ frame_height = int(cap.get(4))
86
+ count = 0
87
+ while(cap.isOpened()):
88
+ ret, frame = cap.read()
89
+ if ret == True:
90
+ detections, _ = detector.detect(frame, my_thresh, 1)
91
+ for i in range(len(detections)):
92
+ det_xmin = detections[i][2]
93
+ det_ymin = detections[i][3]
94
+ det_width = detections[i][4]
95
+ det_height = detections[i][5]
96
+ det_xmax = det_xmin + det_width - 1
97
+ det_ymax = det_ymin + det_height - 1
98
+
99
+ det_xmin -= int(det_width * (det_box_scale-1)/2)
100
+ # remove a part of top area for alignment, see paper for details
101
+ det_ymin += int(det_height * (det_box_scale-1)/2)
102
+ det_xmax += int(det_width * (det_box_scale-1)/2)
103
+ det_ymax += int(det_height * (det_box_scale-1)/2)
104
+ det_xmin = max(det_xmin, 0)
105
+ det_ymin = max(det_ymin, 0)
106
+ det_xmax = min(det_xmax, frame_width-1)
107
+ det_ymax = min(det_ymax, frame_height-1)
108
+ det_width = det_xmax - det_xmin + 1
109
+ det_height = det_ymax - det_ymin + 1
110
+ cv2.rectangle(frame, (det_xmin, det_ymin), (det_xmax, det_ymax), (0, 0, 255), 2)
111
+ det_crop = frame[det_ymin:det_ymax, det_xmin:det_xmax, :]
112
+ det_crop = cv2.resize(det_crop, (input_size, input_size))
113
+ inputs = Image.fromarray(det_crop[:,:,::-1].astype('uint8'), 'RGB')
114
+ inputs = preprocess(inputs).unsqueeze(0)
115
+ inputs = inputs.to(device)
116
+ lms_pred_x, lms_pred_y, lms_pred_nb_x, lms_pred_nb_y, outputs_cls, max_cls = forward_pip(net, inputs, preprocess, input_size, net_stride, num_nb)
117
+ lms_pred = torch.cat((lms_pred_x, lms_pred_y), dim=1).flatten()
118
+ tmp_nb_x = lms_pred_nb_x[reverse_index1, reverse_index2].view(cfg.num_lms, max_len)
119
+ tmp_nb_y = lms_pred_nb_y[reverse_index1, reverse_index2].view(cfg.num_lms, max_len)
120
+ tmp_x = torch.mean(torch.cat((lms_pred_x, tmp_nb_x), dim=1), dim=1).view(-1,1)
121
+ tmp_y = torch.mean(torch.cat((lms_pred_y, tmp_nb_y), dim=1), dim=1).view(-1,1)
122
+ lms_pred_merge = torch.cat((tmp_x, tmp_y), dim=1).flatten()
123
+ lms_pred = lms_pred.cpu().numpy()
124
+ lms_pred_merge = lms_pred_merge.cpu().numpy()
125
+ for i in range(cfg.num_lms):
126
+ x_pred = lms_pred_merge[i*2] * det_width
127
+ y_pred = lms_pred_merge[i*2+1] * det_height
128
+ cv2.circle(frame, (int(x_pred)+det_xmin, int(y_pred)+det_ymin), 1, (0, 0, 255), 2)
129
+
130
+ count += 1
131
+ #cv2.imwrite('video_out2/'+str(count)+'.jpg', frame)
132
+ cv2.imshow('1', frame)
133
+ if cv2.waitKey(1) & 0xFF == ord('q'):
134
+ break
135
+ else:
136
+ break
137
+
138
+ cap.release()
139
+ cv2.destroyAllWindows()
140
+
141
+ demo_video(video_file, net, preprocess, cfg.input_size, cfg.net_stride, cfg.num_nb, cfg.use_gpu, device)
third_party/PIPNet/lib/functions.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, cv2
2
+ import numpy as np
3
+ from PIL import Image, ImageFilter
4
+ import logging
5
+ import torch
6
+ import torch.nn as nn
7
+ import random
8
+ import time
9
+ from scipy.integrate import simps
10
+
11
+
12
+ def get_label(data_name, label_file, task_type=None):
13
+ label_path = os.path.join('data', data_name, label_file)
14
+ with open(label_path, 'r') as f:
15
+ labels = f.readlines()
16
+ labels = [x.strip().split() for x in labels]
17
+ if len(labels[0])==1:
18
+ return labels
19
+
20
+ labels_new = []
21
+ for label in labels:
22
+ image_name = label[0]
23
+ target = label[1:]
24
+ target = np.array([float(x) for x in target])
25
+ if task_type is None:
26
+ labels_new.append([image_name, target])
27
+ else:
28
+ labels_new.append([image_name, task_type, target])
29
+ return labels_new
30
+
31
+ def get_meanface(meanface_file, num_nb):
32
+ with open(meanface_file) as f:
33
+ meanface = f.readlines()[0]
34
+
35
+ meanface = meanface.strip().split()
36
+ meanface = [float(x) for x in meanface]
37
+ meanface = np.array(meanface).reshape(-1, 2)
38
+ # each landmark predicts num_nb neighbors
39
+ meanface_indices = []
40
+ for i in range(meanface.shape[0]):
41
+ pt = meanface[i,:]
42
+ dists = np.sum(np.power(pt-meanface, 2), axis=1)
43
+ indices = np.argsort(dists)
44
+ meanface_indices.append(indices[1:1+num_nb])
45
+
46
+ # each landmark predicted by X neighbors, X varies
47
+ meanface_indices_reversed = {}
48
+ for i in range(meanface.shape[0]):
49
+ meanface_indices_reversed[i] = [[],[]]
50
+ for i in range(meanface.shape[0]):
51
+ for j in range(num_nb):
52
+ meanface_indices_reversed[meanface_indices[i][j]][0].append(i)
53
+ meanface_indices_reversed[meanface_indices[i][j]][1].append(j)
54
+
55
+ max_len = 0
56
+ for i in range(meanface.shape[0]):
57
+ tmp_len = len(meanface_indices_reversed[i][0])
58
+ if tmp_len > max_len:
59
+ max_len = tmp_len
60
+
61
+ # tricks, make them have equal length for efficient computation
62
+ for i in range(meanface.shape[0]):
63
+ tmp_len = len(meanface_indices_reversed[i][0])
64
+ meanface_indices_reversed[i][0] += meanface_indices_reversed[i][0]*10
65
+ meanface_indices_reversed[i][1] += meanface_indices_reversed[i][1]*10
66
+ meanface_indices_reversed[i][0] = meanface_indices_reversed[i][0][:max_len]
67
+ meanface_indices_reversed[i][1] = meanface_indices_reversed[i][1][:max_len]
68
+
69
+ # make the indices 1-dim
70
+ reverse_index1 = []
71
+ reverse_index2 = []
72
+ for i in range(meanface.shape[0]):
73
+ reverse_index1 += meanface_indices_reversed[i][0]
74
+ reverse_index2 += meanface_indices_reversed[i][1]
75
+ return meanface_indices, reverse_index1, reverse_index2, max_len
76
+
77
+ def compute_loss_pip(outputs_map, outputs_local_x, outputs_local_y, outputs_nb_x, outputs_nb_y, labels_map, labels_local_x, labels_local_y, labels_nb_x, labels_nb_y, criterion_cls, criterion_reg, num_nb):
78
+
79
+ tmp_batch, tmp_channel, tmp_height, tmp_width = outputs_map.size()
80
+ labels_map = labels_map.view(tmp_batch*tmp_channel, -1)
81
+ labels_max_ids = torch.argmax(labels_map, 1)
82
+ labels_max_ids = labels_max_ids.view(-1, 1)
83
+ labels_max_ids_nb = labels_max_ids.repeat(1, num_nb).view(-1, 1)
84
+
85
+ outputs_local_x = outputs_local_x.view(tmp_batch*tmp_channel, -1)
86
+ outputs_local_x_select = torch.gather(outputs_local_x, 1, labels_max_ids)
87
+ outputs_local_y = outputs_local_y.view(tmp_batch*tmp_channel, -1)
88
+ outputs_local_y_select = torch.gather(outputs_local_y, 1, labels_max_ids)
89
+ outputs_nb_x = outputs_nb_x.view(tmp_batch*num_nb*tmp_channel, -1)
90
+ outputs_nb_x_select = torch.gather(outputs_nb_x, 1, labels_max_ids_nb)
91
+ outputs_nb_y = outputs_nb_y.view(tmp_batch*num_nb*tmp_channel, -1)
92
+ outputs_nb_y_select = torch.gather(outputs_nb_y, 1, labels_max_ids_nb)
93
+
94
+ labels_local_x = labels_local_x.view(tmp_batch*tmp_channel, -1)
95
+ labels_local_x_select = torch.gather(labels_local_x, 1, labels_max_ids)
96
+ labels_local_y = labels_local_y.view(tmp_batch*tmp_channel, -1)
97
+ labels_local_y_select = torch.gather(labels_local_y, 1, labels_max_ids)
98
+ labels_nb_x = labels_nb_x.view(tmp_batch*num_nb*tmp_channel, -1)
99
+ labels_nb_x_select = torch.gather(labels_nb_x, 1, labels_max_ids_nb)
100
+ labels_nb_y = labels_nb_y.view(tmp_batch*num_nb*tmp_channel, -1)
101
+ labels_nb_y_select = torch.gather(labels_nb_y, 1, labels_max_ids_nb)
102
+
103
+ labels_map = labels_map.view(tmp_batch, tmp_channel, tmp_height, tmp_width)
104
+ loss_map = criterion_cls(outputs_map, labels_map)
105
+ loss_x = criterion_reg(outputs_local_x_select, labels_local_x_select)
106
+ loss_y = criterion_reg(outputs_local_y_select, labels_local_y_select)
107
+ loss_nb_x = criterion_reg(outputs_nb_x_select, labels_nb_x_select)
108
+ loss_nb_y = criterion_reg(outputs_nb_y_select, labels_nb_y_select)
109
+ return loss_map, loss_x, loss_y, loss_nb_x, loss_nb_y
110
+
111
+ def train_model(det_head, net, train_loader, criterion_cls, criterion_reg, cls_loss_weight, reg_loss_weight, num_nb, optimizer, num_epochs, scheduler, save_dir, save_interval, device):
112
+ for epoch in range(num_epochs):
113
+ print('Epoch {}/{}'.format(epoch, num_epochs - 1))
114
+ logging.info('Epoch {}/{}'.format(epoch, num_epochs - 1))
115
+ print('-' * 10)
116
+ logging.info('-' * 10)
117
+ net.train()
118
+ epoch_loss = 0.0
119
+
120
+ for i, data in enumerate(train_loader):
121
+ if det_head == 'pip':
122
+ inputs, labels_map, labels_x, labels_y, labels_nb_x, labels_nb_y = data
123
+ inputs = inputs.to(device)
124
+ labels_map = labels_map.to(device)
125
+ labels_x = labels_x.to(device)
126
+ labels_y = labels_y.to(device)
127
+ labels_nb_x = labels_nb_x.to(device)
128
+ labels_nb_y = labels_nb_y.to(device)
129
+ outputs_map, outputs_x, outputs_y, outputs_nb_x, outputs_nb_y = net(inputs)
130
+ loss_map, loss_x, loss_y, loss_nb_x, loss_nb_y = compute_loss_pip(outputs_map, outputs_x, outputs_y, outputs_nb_x, outputs_nb_y, labels_map, labels_x, labels_y, labels_nb_x, labels_nb_y, criterion_cls, criterion_reg, num_nb)
131
+ loss = cls_loss_weight*loss_map + reg_loss_weight*loss_x + reg_loss_weight*loss_y + reg_loss_weight*loss_nb_x + reg_loss_weight*loss_nb_y
132
+ else:
133
+ print('No such head:', det_head)
134
+ exit(0)
135
+
136
+ optimizer.zero_grad()
137
+ loss.backward()
138
+ optimizer.step()
139
+ if i%10 == 0:
140
+ if det_head == 'pip':
141
+ print('[Epoch {:d}/{:d}, Batch {:d}/{:d}] <Total loss: {:.6f}> <map loss: {:.6f}> <x loss: {:.6f}> <y loss: {:.6f}> <nbx loss: {:.6f}> <nby loss: {:.6f}>'.format(
142
+ epoch, num_epochs-1, i, len(train_loader)-1, loss.item(), cls_loss_weight*loss_map.item(), reg_loss_weight*loss_x.item(), reg_loss_weight*loss_y.item(), reg_loss_weight*loss_nb_x.item(), reg_loss_weight*loss_nb_y.item()))
143
+ logging.info('[Epoch {:d}/{:d}, Batch {:d}/{:d}] <Total loss: {:.6f}> <map loss: {:.6f}> <x loss: {:.6f}> <y loss: {:.6f}> <nbx loss: {:.6f}> <nby loss: {:.6f}>'.format(
144
+ epoch, num_epochs-1, i, len(train_loader)-1, loss.item(), cls_loss_weight*loss_map.item(), reg_loss_weight*loss_x.item(), reg_loss_weight*loss_y.item(), reg_loss_weight*loss_nb_x.item(), reg_loss_weight*loss_nb_y.item()))
145
+ else:
146
+ print('No such head:', det_head)
147
+ exit(0)
148
+ epoch_loss += loss.item()
149
+ epoch_loss /= len(train_loader)
150
+ if epoch%(save_interval-1) == 0 and epoch > 0:
151
+ filename = os.path.join(save_dir, 'epoch%d.pth' % epoch)
152
+ torch.save(net.state_dict(), filename)
153
+ print(filename, 'saved')
154
+ scheduler.step()
155
+ return net
156
+
157
+ def forward_pip(net, inputs, preprocess, input_size, net_stride, num_nb):
158
+ net.eval()
159
+ with torch.no_grad():
160
+ outputs_cls, outputs_x, outputs_y, outputs_nb_x, outputs_nb_y = net(inputs)
161
+ tmp_batch, tmp_channel, tmp_height, tmp_width = outputs_cls.size()
162
+ assert tmp_batch == 1
163
+
164
+ outputs_cls = outputs_cls.view(tmp_batch*tmp_channel, -1)
165
+ max_ids = torch.argmax(outputs_cls, 1)
166
+ max_cls = torch.max(outputs_cls, 1)[0]
167
+ max_ids = max_ids.view(-1, 1)
168
+ max_ids_nb = max_ids.repeat(1, num_nb).view(-1, 1)
169
+
170
+ outputs_x = outputs_x.view(tmp_batch*tmp_channel, -1)
171
+ outputs_x_select = torch.gather(outputs_x, 1, max_ids)
172
+ outputs_x_select = outputs_x_select.squeeze(1)
173
+ outputs_y = outputs_y.view(tmp_batch*tmp_channel, -1)
174
+ outputs_y_select = torch.gather(outputs_y, 1, max_ids)
175
+ outputs_y_select = outputs_y_select.squeeze(1)
176
+
177
+ outputs_nb_x = outputs_nb_x.view(tmp_batch*num_nb*tmp_channel, -1)
178
+ outputs_nb_x_select = torch.gather(outputs_nb_x, 1, max_ids_nb)
179
+ outputs_nb_x_select = outputs_nb_x_select.squeeze(1).view(-1, num_nb)
180
+ outputs_nb_y = outputs_nb_y.view(tmp_batch*num_nb*tmp_channel, -1)
181
+ outputs_nb_y_select = torch.gather(outputs_nb_y, 1, max_ids_nb)
182
+ outputs_nb_y_select = outputs_nb_y_select.squeeze(1).view(-1, num_nb)
183
+
184
+ tmp_x = (max_ids%tmp_width).view(-1,1).float()+outputs_x_select.view(-1,1)
185
+ tmp_y = (max_ids//tmp_width).view(-1,1).float()+outputs_y_select.view(-1,1)
186
+ tmp_x /= 1.0 * input_size / net_stride
187
+ tmp_y /= 1.0 * input_size / net_stride
188
+
189
+ tmp_nb_x = (max_ids%tmp_width).view(-1,1).float()+outputs_nb_x_select
190
+ tmp_nb_y = (max_ids//tmp_width).view(-1,1).float()+outputs_nb_y_select
191
+ tmp_nb_x = tmp_nb_x.view(-1, num_nb)
192
+ tmp_nb_y = tmp_nb_y.view(-1, num_nb)
193
+ tmp_nb_x /= 1.0 * input_size / net_stride
194
+ tmp_nb_y /= 1.0 * input_size / net_stride
195
+
196
+ return tmp_x, tmp_y, tmp_nb_x, tmp_nb_y, outputs_cls, max_cls
197
+
198
+ def compute_nme(lms_pred, lms_gt, norm):
199
+ lms_pred = lms_pred.reshape((-1, 2))
200
+ lms_gt = lms_gt.reshape((-1, 2))
201
+ nme = np.mean(np.linalg.norm(lms_pred - lms_gt, axis=1)) / norm
202
+ return nme
203
+
204
+ def compute_fr_and_auc(nmes, thres=0.1, step=0.0001):
205
+ num_data = len(nmes)
206
+ xs = np.arange(0, thres + step, step)
207
+ ys = np.array([np.count_nonzero(nmes <= x) for x in xs]) / float(num_data)
208
+ fr = 1.0 - ys[-1]
209
+ auc = simps(ys, x=xs) / thres
210
+ return fr, auc
third_party/PIPNet/lib/functions_gssl.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, cv2
2
+ import numpy as np
3
+ from PIL import Image, ImageFilter
4
+ import logging
5
+ import torch
6
+ import torch.nn as nn
7
+ import random
8
+
9
+ def get_label(data_name, label_file, task_type=None):
10
+ label_path = os.path.join('data', data_name, label_file)
11
+ with open(label_path, 'r') as f:
12
+ labels = f.readlines()
13
+ labels = [x.strip().split() for x in labels]
14
+ if len(labels[0])==1:
15
+ return labels
16
+
17
+ labels_new = []
18
+ for label in labels:
19
+ image_name = label[0]
20
+ target = label[1:]
21
+ target = np.array([float(x) for x in target])
22
+ if task_type is None:
23
+ labels_new.append([image_name, target])
24
+ else:
25
+ labels_new.append([image_name, task_type, target])
26
+ return labels_new
27
+
28
+ def get_meanface(meanface_file, num_nb):
29
+ with open(meanface_file) as f:
30
+ meanface = f.readlines()[0]
31
+
32
+ meanface = meanface.strip().split()
33
+ meanface = [float(x) for x in meanface]
34
+ meanface = np.array(meanface).reshape(-1, 2)
35
+ # each landmark predicts num_nb neighbors
36
+ meanface_indices = []
37
+ for i in range(meanface.shape[0]):
38
+ pt = meanface[i,:]
39
+ dists = np.sum(np.power(pt-meanface, 2), axis=1)
40
+ indices = np.argsort(dists)
41
+ meanface_indices.append(indices[1:1+num_nb])
42
+
43
+ # each landmark predicted by X neighbors, X varies
44
+ meanface_indices_reversed = {}
45
+ for i in range(meanface.shape[0]):
46
+ meanface_indices_reversed[i] = [[],[]]
47
+ for i in range(meanface.shape[0]):
48
+ for j in range(num_nb):
49
+ meanface_indices_reversed[meanface_indices[i][j]][0].append(i)
50
+ meanface_indices_reversed[meanface_indices[i][j]][1].append(j)
51
+
52
+ max_len = 0
53
+ for i in range(meanface.shape[0]):
54
+ tmp_len = len(meanface_indices_reversed[i][0])
55
+ if tmp_len > max_len:
56
+ max_len = tmp_len
57
+
58
+ # tricks, make them have equal length for efficient computation
59
+ for i in range(meanface.shape[0]):
60
+ tmp_len = len(meanface_indices_reversed[i][0])
61
+ meanface_indices_reversed[i][0] += meanface_indices_reversed[i][0]*10
62
+ meanface_indices_reversed[i][1] += meanface_indices_reversed[i][1]*10
63
+ meanface_indices_reversed[i][0] = meanface_indices_reversed[i][0][:max_len]
64
+ meanface_indices_reversed[i][1] = meanface_indices_reversed[i][1][:max_len]
65
+
66
+ # make the indices 1-dim
67
+ reverse_index1 = []
68
+ reverse_index2 = []
69
+ for i in range(meanface.shape[0]):
70
+ reverse_index1 += meanface_indices_reversed[i][0]
71
+ reverse_index2 += meanface_indices_reversed[i][1]
72
+ return meanface_indices, reverse_index1, reverse_index2, max_len
73
+
74
+ def compute_loss_pip(outputs_map1, outputs_map2, outputs_map3, outputs_local_x, outputs_local_y, outputs_nb_x, outputs_nb_y, labels_map1, labels_map2, labels_map3, labels_local_x, labels_local_y, labels_nb_x, labels_nb_y, masks_map1, masks_map2, masks_map3, masks_local_x, masks_local_y, masks_nb_x, masks_nb_y, criterion_cls, criterion_reg, num_nb):
75
+
76
+ tmp_batch, tmp_channel, tmp_height, tmp_width = outputs_map1.size()
77
+ labels_map1 = labels_map1.view(tmp_batch*tmp_channel, -1)
78
+ labels_max_ids = torch.argmax(labels_map1, 1)
79
+ labels_max_ids = labels_max_ids.view(-1, 1)
80
+ labels_max_ids_nb = labels_max_ids.repeat(1, num_nb).view(-1, 1)
81
+
82
+ outputs_local_x = outputs_local_x.view(tmp_batch*tmp_channel, -1)
83
+ outputs_local_x_select = torch.gather(outputs_local_x, 1, labels_max_ids)
84
+ outputs_local_y = outputs_local_y.view(tmp_batch*tmp_channel, -1)
85
+ outputs_local_y_select = torch.gather(outputs_local_y, 1, labels_max_ids)
86
+ outputs_nb_x = outputs_nb_x.view(tmp_batch*num_nb*tmp_channel, -1)
87
+ outputs_nb_x_select = torch.gather(outputs_nb_x, 1, labels_max_ids_nb)
88
+ outputs_nb_y = outputs_nb_y.view(tmp_batch*num_nb*tmp_channel, -1)
89
+ outputs_nb_y_select = torch.gather(outputs_nb_y, 1, labels_max_ids_nb)
90
+
91
+ labels_local_x = labels_local_x.view(tmp_batch*tmp_channel, -1)
92
+ labels_local_x_select = torch.gather(labels_local_x, 1, labels_max_ids)
93
+ labels_local_y = labels_local_y.view(tmp_batch*tmp_channel, -1)
94
+ labels_local_y_select = torch.gather(labels_local_y, 1, labels_max_ids)
95
+ labels_nb_x = labels_nb_x.view(tmp_batch*num_nb*tmp_channel, -1)
96
+ labels_nb_x_select = torch.gather(labels_nb_x, 1, labels_max_ids_nb)
97
+ labels_nb_y = labels_nb_y.view(tmp_batch*num_nb*tmp_channel, -1)
98
+ labels_nb_y_select = torch.gather(labels_nb_y, 1, labels_max_ids_nb)
99
+
100
+ masks_local_x = masks_local_x.view(tmp_batch*tmp_channel, -1)
101
+ masks_local_x_select = torch.gather(masks_local_x, 1, labels_max_ids)
102
+ masks_local_y = masks_local_y.view(tmp_batch*tmp_channel, -1)
103
+ masks_local_y_select = torch.gather(masks_local_y, 1, labels_max_ids)
104
+ masks_nb_x = masks_nb_x.view(tmp_batch*num_nb*tmp_channel, -1)
105
+ masks_nb_x_select = torch.gather(masks_nb_x, 1, labels_max_ids_nb)
106
+ masks_nb_y = masks_nb_y.view(tmp_batch*num_nb*tmp_channel, -1)
107
+ masks_nb_y_select = torch.gather(masks_nb_y, 1, labels_max_ids_nb)
108
+
109
+ ##########################################
110
+ outputs_map1 = outputs_map1.view(tmp_batch*tmp_channel, -1)
111
+ outputs_map2 = outputs_map2.view(tmp_batch*tmp_channel, -1)
112
+ outputs_map3 = outputs_map3.view(tmp_batch*tmp_channel, -1)
113
+ labels_map2 = labels_map2.view(tmp_batch*tmp_channel, -1)
114
+ labels_map3 = labels_map3.view(tmp_batch*tmp_channel, -1)
115
+ masks_map1 = masks_map1.view(tmp_batch*tmp_channel, -1)
116
+ masks_map2 = masks_map2.view(tmp_batch*tmp_channel, -1)
117
+ masks_map3 = masks_map3.view(tmp_batch*tmp_channel, -1)
118
+ outputs_map = torch.cat([outputs_map1, outputs_map2, outputs_map3], 1)
119
+ labels_map = torch.cat([labels_map1, labels_map2, labels_map3], 1)
120
+ masks_map = torch.cat([masks_map1, masks_map2, masks_map3], 1)
121
+ loss_map = criterion_cls(outputs_map*masks_map, labels_map*masks_map)
122
+ if not masks_map.sum() == 0:
123
+ loss_map /= masks_map.sum()
124
+ ##########################################
125
+
126
+ loss_x = criterion_reg(outputs_local_x_select*masks_local_x_select, labels_local_x_select*masks_local_x_select)
127
+ if not masks_local_x_select.sum() == 0:
128
+ loss_x /= masks_local_x_select.sum()
129
+ loss_y = criterion_reg(outputs_local_y_select*masks_local_y_select, labels_local_y_select*masks_local_y_select)
130
+ if not masks_local_y_select.sum() == 0:
131
+ loss_y /= masks_local_y_select.sum()
132
+ loss_nb_x = criterion_reg(outputs_nb_x_select*masks_nb_x_select, labels_nb_x_select*masks_nb_x_select)
133
+ if not masks_nb_x_select.sum() == 0:
134
+ loss_nb_x /= masks_nb_x_select.sum()
135
+ loss_nb_y = criterion_reg(outputs_nb_y_select*masks_nb_y_select, labels_nb_y_select*masks_nb_y_select)
136
+ if not masks_nb_y_select.sum() == 0:
137
+ loss_nb_y /= masks_nb_y_select.sum()
138
+ return loss_map, loss_x, loss_y, loss_nb_x, loss_nb_y
139
+
140
+ def train_model(det_head, net, train_loader, criterion_cls, criterion_reg, cls_loss_weight, reg_loss_weight, num_nb, optimizer, num_epochs, scheduler, save_dir, save_interval, device):
141
+ for epoch in range(num_epochs):
142
+ print('Epoch {}/{}'.format(epoch, num_epochs - 1))
143
+ logging.info('Epoch {}/{}'.format(epoch, num_epochs - 1))
144
+ print('-' * 10)
145
+ logging.info('-' * 10)
146
+ net.train()
147
+ epoch_loss = 0.0
148
+
149
+ for i, data in enumerate(train_loader):
150
+ if det_head == 'pip':
151
+ inputs, labels_map1, labels_map2, labels_map3, labels_x, labels_y, labels_nb_x, labels_nb_y, masks_map1, masks_map2, masks_map3, masks_x, masks_y, masks_nb_x, masks_nb_y = data
152
+ inputs = inputs.to(device)
153
+ labels_map1 = labels_map1.to(device)
154
+ labels_map2 = labels_map2.to(device)
155
+ labels_map3 = labels_map3.to(device)
156
+ labels_x = labels_x.to(device)
157
+ labels_y = labels_y.to(device)
158
+ labels_nb_x = labels_nb_x.to(device)
159
+ labels_nb_y = labels_nb_y.to(device)
160
+ masks_map1 = masks_map1.to(device)
161
+ masks_map2 = masks_map2.to(device)
162
+ masks_map3 = masks_map3.to(device)
163
+ masks_x = masks_x.to(device)
164
+ masks_y = masks_y.to(device)
165
+ masks_nb_x = masks_nb_x.to(device)
166
+ masks_nb_y = masks_nb_y.to(device)
167
+ outputs_map1, outputs_map2, outputs_map3, outputs_x, outputs_y, outputs_nb_x, outputs_nb_y = net(inputs)
168
+ loss_map, loss_x, loss_y, loss_nb_x, loss_nb_y = compute_loss_pip(outputs_map1, outputs_map2, outputs_map3, outputs_x, outputs_y, outputs_nb_x, outputs_nb_y, labels_map1, labels_map2, labels_map3, labels_x, labels_y, labels_nb_x, labels_nb_y, masks_map1, masks_map2, masks_map3, masks_x, masks_y, masks_nb_x, masks_nb_y, criterion_cls, criterion_reg, num_nb)
169
+ loss = cls_loss_weight*loss_map + reg_loss_weight*loss_x + reg_loss_weight*loss_y + reg_loss_weight*loss_nb_x + reg_loss_weight*loss_nb_y
170
+ else:
171
+ print('No such head:', det_head)
172
+ exit(0)
173
+
174
+ optimizer.zero_grad()
175
+ loss.backward()
176
+ optimizer.step()
177
+ if i%10 == 0:
178
+ if det_head == 'pip':
179
+ print('[Epoch {:d}/{:d}, Batch {:d}/{:d}] <Total loss: {:.6f}> <map loss: {:.6f}> <x loss: {:.6f}> <y loss: {:.6f}> <nbx loss: {:.6f}> <nby loss: {:.6f}>'.format(
180
+ epoch, num_epochs-1, i, len(train_loader)-1, loss.item(), cls_loss_weight*loss_map.item(), reg_loss_weight*loss_x.item(), reg_loss_weight*loss_y.item(), reg_loss_weight*loss_nb_x.item(), reg_loss_weight*loss_nb_y.item()))
181
+ logging.info('[Epoch {:d}/{:d}, Batch {:d}/{:d}] <Total loss: {:.6f}> <map loss: {:.6f}> <x loss: {:.6f}> <y loss: {:.6f}> <nbx loss: {:.6f}> <nby loss: {:.6f}>'.format(
182
+ epoch, num_epochs-1, i, len(train_loader)-1, loss.item(), cls_loss_weight*loss_map.item(), reg_loss_weight*loss_x.item(), reg_loss_weight*loss_y.item(), reg_loss_weight*loss_nb_x.item(), reg_loss_weight*loss_nb_y.item()))
183
+ else:
184
+ print('No such head:', det_head)
185
+ exit(0)
186
+ epoch_loss += loss.item()
187
+ epoch_loss /= len(train_loader)
188
+ if epoch%(save_interval-1) == 0 and epoch > 0:
189
+ filename = os.path.join(save_dir, 'epoch%d.pth' % epoch)
190
+ torch.save(net.state_dict(), filename)
191
+ print(filename, 'saved')
192
+ scheduler.step()
193
+ return net
194
+
195
+ def forward_pip(net, inputs, preprocess, input_size, net_stride, num_nb):
196
+ net.eval()
197
+ with torch.no_grad():
198
+ outputs_cls1, outputs_cls2, outputs_cls3, outputs_x, outputs_y, outputs_nb_x, outputs_nb_y = net(inputs)
199
+ tmp_batch, tmp_channel, tmp_height, tmp_width = outputs_cls1.size()
200
+ assert tmp_batch == 1
201
+
202
+ outputs_cls1 = outputs_cls1.view(tmp_batch*tmp_channel, -1)
203
+ max_ids = torch.argmax(outputs_cls1, 1)
204
+ max_cls = torch.max(outputs_cls1, 1)[0]
205
+ max_ids = max_ids.view(-1, 1)
206
+ max_ids_nb = max_ids.repeat(1, num_nb).view(-1, 1)
207
+
208
+ outputs_x = outputs_x.view(tmp_batch*tmp_channel, -1)
209
+ outputs_x_select = torch.gather(outputs_x, 1, max_ids)
210
+ outputs_x_select = outputs_x_select.squeeze(1)
211
+ outputs_y = outputs_y.view(tmp_batch*tmp_channel, -1)
212
+ outputs_y_select = torch.gather(outputs_y, 1, max_ids)
213
+ outputs_y_select = outputs_y_select.squeeze(1)
214
+
215
+ outputs_nb_x = outputs_nb_x.view(tmp_batch*num_nb*tmp_channel, -1)
216
+ outputs_nb_x_select = torch.gather(outputs_nb_x, 1, max_ids_nb)
217
+ outputs_nb_x_select = outputs_nb_x_select.squeeze(1).view(-1, num_nb)
218
+ outputs_nb_y = outputs_nb_y.view(tmp_batch*num_nb*tmp_channel, -1)
219
+ outputs_nb_y_select = torch.gather(outputs_nb_y, 1, max_ids_nb)
220
+ outputs_nb_y_select = outputs_nb_y_select.squeeze(1).view(-1, num_nb)
221
+
222
+ tmp_x = (max_ids%tmp_width).view(-1,1).float()+outputs_x_select.view(-1,1)
223
+ tmp_y = (max_ids//tmp_width).view(-1,1).float()+outputs_y_select.view(-1,1)
224
+ tmp_x /= 1.0 * input_size / net_stride
225
+ tmp_y /= 1.0 * input_size / net_stride
226
+
227
+ tmp_nb_x = (max_ids%tmp_width).view(-1,1).float()+outputs_nb_x_select
228
+ tmp_nb_y = (max_ids//tmp_width).view(-1,1).float()+outputs_nb_y_select
229
+ tmp_nb_x = tmp_nb_x.view(-1, num_nb)
230
+ tmp_nb_y = tmp_nb_y.view(-1, num_nb)
231
+ tmp_nb_x /= 1.0 * input_size / net_stride
232
+ tmp_nb_y /= 1.0 * input_size / net_stride
233
+
234
+ return tmp_x, tmp_y, tmp_nb_x, tmp_nb_y, [outputs_cls1, outputs_cls2, outputs_cls3], max_cls
235
+
236
+ def compute_nme(lms_pred, lms_gt, norm):
237
+ lms_pred = lms_pred.reshape((-1, 2))
238
+ lms_gt = lms_gt.reshape((-1, 2))
239
+ nme = np.mean(np.linalg.norm(lms_pred - lms_gt, axis=1)) / norm
240
+ return nme
241
+
third_party/PIPNet/lib/mobilenetv3.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Creates a MobileNetV3 Model as defined in:
3
+ Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan, Weijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V. Le, Hartwig Adam. (2019).
4
+ Searching for MobileNetV3
5
+ arXiv preprint arXiv:1905.02244.
6
+ """
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ import math
11
+
12
+
13
+ __all__ = ['mobilenetv3_large', 'mobilenetv3_small']
14
+
15
+
16
+ def _make_divisible(v, divisor, min_value=None):
17
+ """
18
+ This function is taken from the original tf repo.
19
+ It ensures that all layers have a channel number that is divisible by 8
20
+ It can be seen here:
21
+ https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
22
+ :param v:
23
+ :param divisor:
24
+ :param min_value:
25
+ :return:
26
+ """
27
+ if min_value is None:
28
+ min_value = divisor
29
+ new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
30
+ # Make sure that round down does not go down by more than 10%.
31
+ if new_v < 0.9 * v:
32
+ new_v += divisor
33
+ return new_v
34
+
35
+
36
+ class h_sigmoid(nn.Module):
37
+ def __init__(self, inplace=True):
38
+ super(h_sigmoid, self).__init__()
39
+ self.relu = nn.ReLU6(inplace=inplace)
40
+
41
+ def forward(self, x):
42
+ return self.relu(x + 3) / 6
43
+
44
+
45
+ class h_swish(nn.Module):
46
+ def __init__(self, inplace=True):
47
+ super(h_swish, self).__init__()
48
+ self.sigmoid = h_sigmoid(inplace=inplace)
49
+
50
+ def forward(self, x):
51
+ return x * self.sigmoid(x)
52
+
53
+
54
+ class SELayer(nn.Module):
55
+ def __init__(self, channel, reduction=4):
56
+ super(SELayer, self).__init__()
57
+ self.avg_pool = nn.AdaptiveAvgPool2d(1)
58
+ self.fc = nn.Sequential(
59
+ nn.Linear(channel, _make_divisible(channel // reduction, 8)),
60
+ nn.ReLU(inplace=True),
61
+ nn.Linear(_make_divisible(channel // reduction, 8), channel),
62
+ h_sigmoid()
63
+ )
64
+
65
+ def forward(self, x):
66
+ b, c, _, _ = x.size()
67
+ y = self.avg_pool(x).view(b, c)
68
+ y = self.fc(y).view(b, c, 1, 1)
69
+ return x * y
70
+
71
+
72
+ def conv_3x3_bn(inp, oup, stride):
73
+ return nn.Sequential(
74
+ nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
75
+ nn.BatchNorm2d(oup),
76
+ h_swish()
77
+ )
78
+
79
+
80
+ def conv_1x1_bn(inp, oup):
81
+ return nn.Sequential(
82
+ nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
83
+ nn.BatchNorm2d(oup),
84
+ h_swish()
85
+ )
86
+
87
+
88
+ class InvertedResidual(nn.Module):
89
+ def __init__(self, inp, hidden_dim, oup, kernel_size, stride, use_se, use_hs):
90
+ super(InvertedResidual, self).__init__()
91
+ assert stride in [1, 2]
92
+
93
+ self.identity = stride == 1 and inp == oup
94
+
95
+ if inp == hidden_dim:
96
+ self.conv = nn.Sequential(
97
+ # dw
98
+ nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, (kernel_size - 1) // 2, groups=hidden_dim, bias=False),
99
+ nn.BatchNorm2d(hidden_dim),
100
+ h_swish() if use_hs else nn.ReLU(inplace=True),
101
+ # Squeeze-and-Excite
102
+ SELayer(hidden_dim) if use_se else nn.Identity(),
103
+ # pw-linear
104
+ nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
105
+ nn.BatchNorm2d(oup),
106
+ )
107
+ else:
108
+ self.conv = nn.Sequential(
109
+ # pw
110
+ nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
111
+ nn.BatchNorm2d(hidden_dim),
112
+ h_swish() if use_hs else nn.ReLU(inplace=True),
113
+ # dw
114
+ nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, (kernel_size - 1) // 2, groups=hidden_dim, bias=False),
115
+ nn.BatchNorm2d(hidden_dim),
116
+ # Squeeze-and-Excite
117
+ SELayer(hidden_dim) if use_se else nn.Identity(),
118
+ h_swish() if use_hs else nn.ReLU(inplace=True),
119
+ # pw-linear
120
+ nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
121
+ nn.BatchNorm2d(oup),
122
+ )
123
+
124
+ def forward(self, x):
125
+ if self.identity:
126
+ return x + self.conv(x)
127
+ else:
128
+ return self.conv(x)
129
+
130
+
131
+ class MobileNetV3(nn.Module):
132
+ def __init__(self, cfgs, mode, num_classes=1000, width_mult=1.):
133
+ super(MobileNetV3, self).__init__()
134
+ # setting of inverted residual blocks
135
+ self.cfgs = cfgs
136
+ assert mode in ['large', 'small']
137
+
138
+ # building first layer
139
+ input_channel = _make_divisible(16 * width_mult, 8)
140
+ layers = [conv_3x3_bn(3, input_channel, 2)]
141
+ # building inverted residual blocks
142
+ block = InvertedResidual
143
+ for k, t, c, use_se, use_hs, s in self.cfgs:
144
+ output_channel = _make_divisible(c * width_mult, 8)
145
+ exp_size = _make_divisible(input_channel * t, 8)
146
+ layers.append(block(input_channel, exp_size, output_channel, k, s, use_se, use_hs))
147
+ input_channel = output_channel
148
+ self.features = nn.Sequential(*layers)
149
+ # building last several layers
150
+ self.conv = conv_1x1_bn(input_channel, exp_size)
151
+ self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
152
+ output_channel = {'large': 1280, 'small': 1024}
153
+ output_channel = _make_divisible(output_channel[mode] * width_mult, 8) if width_mult > 1.0 else output_channel[mode]
154
+ self.classifier = nn.Sequential(
155
+ nn.Linear(exp_size, output_channel),
156
+ h_swish(),
157
+ nn.Dropout(0.2),
158
+ nn.Linear(output_channel, num_classes),
159
+ )
160
+
161
+ self._initialize_weights()
162
+
163
+ def forward(self, x):
164
+ x = self.features(x)
165
+ x = self.conv(x)
166
+ x = self.avgpool(x)
167
+ x = x.view(x.size(0), -1)
168
+ x = self.classifier(x)
169
+ return x
170
+
171
+ def _initialize_weights(self):
172
+ for m in self.modules():
173
+ if isinstance(m, nn.Conv2d):
174
+ n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
175
+ m.weight.data.normal_(0, math.sqrt(2. / n))
176
+ if m.bias is not None:
177
+ m.bias.data.zero_()
178
+ elif isinstance(m, nn.BatchNorm2d):
179
+ m.weight.data.fill_(1)
180
+ m.bias.data.zero_()
181
+ elif isinstance(m, nn.Linear):
182
+ m.weight.data.normal_(0, 0.01)
183
+ m.bias.data.zero_()
184
+
185
+
186
+ def mobilenetv3_large(**kwargs):
187
+ """
188
+ Constructs a MobileNetV3-Large model
189
+ """
190
+ cfgs = [
191
+ # k, t, c, SE, HS, s
192
+ [3, 1, 16, 0, 0, 1],
193
+ [3, 4, 24, 0, 0, 2],
194
+ [3, 3, 24, 0, 0, 1],
195
+ [5, 3, 40, 1, 0, 2],
196
+ [5, 3, 40, 1, 0, 1],
197
+ [5, 3, 40, 1, 0, 1],
198
+ [3, 6, 80, 0, 1, 2],
199
+ [3, 2.5, 80, 0, 1, 1],
200
+ [3, 2.3, 80, 0, 1, 1],
201
+ [3, 2.3, 80, 0, 1, 1],
202
+ [3, 6, 112, 1, 1, 1],
203
+ [3, 6, 112, 1, 1, 1],
204
+ [5, 6, 160, 1, 1, 2],
205
+ [5, 6, 160, 1, 1, 1],
206
+ [5, 6, 160, 1, 1, 1]
207
+ ]
208
+ return MobileNetV3(cfgs, mode='large', **kwargs)
209
+
210
+
211
+ def mobilenetv3_small(**kwargs):
212
+ """
213
+ Constructs a MobileNetV3-Small model
214
+ """
215
+ cfgs = [
216
+ # k, t, c, SE, HS, s
217
+ [3, 1, 16, 1, 0, 2],
218
+ [3, 4.5, 24, 0, 0, 2],
219
+ [3, 3.67, 24, 0, 0, 1],
220
+ [5, 4, 40, 1, 1, 2],
221
+ [5, 6, 40, 1, 1, 1],
222
+ [5, 6, 40, 1, 1, 1],
223
+ [5, 3, 48, 1, 1, 1],
224
+ [5, 3, 48, 1, 1, 1],
225
+ [5, 6, 96, 1, 1, 2],
226
+ [5, 6, 96, 1, 1, 1],
227
+ [5, 6, 96, 1, 1, 1],
228
+ ]
229
+
230
+ return MobileNetV3(cfgs, mode='small', **kwargs)
231
+
232
+
233
+
third_party/PIPNet/lib/networks.py ADDED
@@ -0,0 +1,415 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ import torchvision.models as models
5
+ import numpy as np
6
+
7
+ # net_stride output_size
8
+ # 128 2x2
9
+ # 64 4x4
10
+ # 32 8x8
11
+ # pip regression, resnet101
12
+ class Pip_resnet101(nn.Module):
13
+ def __init__(self, resnet, num_nb, num_lms=68, input_size=256, net_stride=32):
14
+ super(Pip_resnet101, self).__init__()
15
+ self.num_nb = num_nb
16
+ self.num_lms = num_lms
17
+ self.input_size = input_size
18
+ self.net_stride = net_stride
19
+ self.conv1 = resnet.conv1
20
+ self.bn1 = resnet.bn1
21
+ self.maxpool = resnet.maxpool
22
+ self.sigmoid = nn.Sigmoid()
23
+ self.layer1 = resnet.layer1
24
+ self.layer2 = resnet.layer2
25
+ self.layer3 = resnet.layer3
26
+ self.layer4 = resnet.layer4
27
+ if self.net_stride == 128:
28
+ self.layer5 = nn.Conv2d(2048, 512, kernel_size=3, stride=2, padding=1)
29
+ self.bn5 = nn.BatchNorm2d(512)
30
+ self.layer6 = nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1)
31
+ self.bn6 = nn.BatchNorm2d(512)
32
+ # init
33
+ nn.init.normal_(self.layer5.weight, std=0.001)
34
+ if self.layer5.bias is not None:
35
+ nn.init.constant_(self.layer5.bias, 0)
36
+ nn.init.constant_(self.bn5.weight, 1)
37
+ nn.init.constant_(self.bn5.bias, 0)
38
+
39
+ nn.init.normal_(self.layer6.weight, std=0.001)
40
+ if self.layer6.bias is not None:
41
+ nn.init.constant_(self.layer6.bias, 0)
42
+ nn.init.constant_(self.bn6.weight, 1)
43
+ nn.init.constant_(self.bn6.bias, 0)
44
+ elif self.net_stride == 64:
45
+ self.layer5 = nn.Conv2d(2048, 512, kernel_size=3, stride=2, padding=1)
46
+ self.bn5 = nn.BatchNorm2d(512)
47
+ # init
48
+ nn.init.normal_(self.layer5.weight, std=0.001)
49
+ if self.layer5.bias is not None:
50
+ nn.init.constant_(self.layer5.bias, 0)
51
+ nn.init.constant_(self.bn5.weight, 1)
52
+ nn.init.constant_(self.bn5.bias, 0)
53
+ elif self.net_stride == 32:
54
+ pass
55
+ else:
56
+ print('No such net_stride!')
57
+ exit(0)
58
+
59
+ self.cls_layer = nn.Conv2d(2048, num_lms, kernel_size=1, stride=1, padding=0)
60
+ self.x_layer = nn.Conv2d(2048, num_lms, kernel_size=1, stride=1, padding=0)
61
+ self.y_layer = nn.Conv2d(2048, num_lms, kernel_size=1, stride=1, padding=0)
62
+ self.nb_x_layer = nn.Conv2d(2048, num_nb*num_lms, kernel_size=1, stride=1, padding=0)
63
+ self.nb_y_layer = nn.Conv2d(2048, num_nb*num_lms, kernel_size=1, stride=1, padding=0)
64
+
65
+ nn.init.normal_(self.cls_layer.weight, std=0.001)
66
+ if self.cls_layer.bias is not None:
67
+ nn.init.constant_(self.cls_layer.bias, 0)
68
+
69
+ nn.init.normal_(self.x_layer.weight, std=0.001)
70
+ if self.x_layer.bias is not None:
71
+ nn.init.constant_(self.x_layer.bias, 0)
72
+
73
+ nn.init.normal_(self.y_layer.weight, std=0.001)
74
+ if self.y_layer.bias is not None:
75
+ nn.init.constant_(self.y_layer.bias, 0)
76
+
77
+ nn.init.normal_(self.nb_x_layer.weight, std=0.001)
78
+ if self.nb_x_layer.bias is not None:
79
+ nn.init.constant_(self.nb_x_layer.bias, 0)
80
+
81
+ nn.init.normal_(self.nb_y_layer.weight, std=0.001)
82
+ if self.nb_y_layer.bias is not None:
83
+ nn.init.constant_(self.nb_y_layer.bias, 0)
84
+
85
+ def forward(self, x):
86
+ x = self.conv1(x)
87
+ x = self.bn1(x)
88
+ x = F.relu(x)
89
+ x = self.maxpool(x)
90
+ x = self.layer1(x)
91
+ x = self.layer2(x)
92
+ x = self.layer3(x)
93
+ x = self.layer4(x)
94
+ if self.net_stride == 128:
95
+ x = F.relu(self.bn5(self.layer5(x)))
96
+ x = F.relu(self.bn6(self.layer6(x)))
97
+ elif self.net_stride == 64:
98
+ x = F.relu(self.bn5(self.layer5(x)))
99
+ else:
100
+ pass
101
+ x1 = self.cls_layer(x)
102
+ x2 = self.x_layer(x)
103
+ x3 = self.y_layer(x)
104
+ x4 = self.nb_x_layer(x)
105
+ x5 = self.nb_y_layer(x)
106
+ return x1, x2, x3, x4, x5
107
+
108
+ # net_stride output_size
109
+ # 128 2x2
110
+ # 64 4x4
111
+ # 32 8x8
112
+ # pip regression, resnet50
113
+ class Pip_resnet50(nn.Module):
114
+ def __init__(self, resnet, num_nb, num_lms=68, input_size=256, net_stride=32):
115
+ super(Pip_resnet50, self).__init__()
116
+ self.num_nb = num_nb
117
+ self.num_lms = num_lms
118
+ self.input_size = input_size
119
+ self.net_stride = net_stride
120
+ self.conv1 = resnet.conv1
121
+ self.bn1 = resnet.bn1
122
+ self.maxpool = resnet.maxpool
123
+ self.sigmoid = nn.Sigmoid()
124
+ self.layer1 = resnet.layer1
125
+ self.layer2 = resnet.layer2
126
+ self.layer3 = resnet.layer3
127
+ self.layer4 = resnet.layer4
128
+ if self.net_stride == 128:
129
+ self.layer5 = nn.Conv2d(2048, 512, kernel_size=3, stride=2, padding=1)
130
+ self.bn5 = nn.BatchNorm2d(512)
131
+ self.layer6 = nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1)
132
+ self.bn6 = nn.BatchNorm2d(512)
133
+ # init
134
+ nn.init.normal_(self.layer5.weight, std=0.001)
135
+ if self.layer5.bias is not None:
136
+ nn.init.constant_(self.layer5.bias, 0)
137
+ nn.init.constant_(self.bn5.weight, 1)
138
+ nn.init.constant_(self.bn5.bias, 0)
139
+
140
+ nn.init.normal_(self.layer6.weight, std=0.001)
141
+ if self.layer6.bias is not None:
142
+ nn.init.constant_(self.layer6.bias, 0)
143
+ nn.init.constant_(self.bn6.weight, 1)
144
+ nn.init.constant_(self.bn6.bias, 0)
145
+ elif self.net_stride == 64:
146
+ self.layer5 = nn.Conv2d(2048, 512, kernel_size=3, stride=2, padding=1)
147
+ self.bn5 = nn.BatchNorm2d(512)
148
+ # init
149
+ nn.init.normal_(self.layer5.weight, std=0.001)
150
+ if self.layer5.bias is not None:
151
+ nn.init.constant_(self.layer5.bias, 0)
152
+ nn.init.constant_(self.bn5.weight, 1)
153
+ nn.init.constant_(self.bn5.bias, 0)
154
+ elif self.net_stride == 32:
155
+ pass
156
+ else:
157
+ print('No such net_stride!')
158
+ exit(0)
159
+
160
+ self.cls_layer = nn.Conv2d(2048, num_lms, kernel_size=1, stride=1, padding=0)
161
+ self.x_layer = nn.Conv2d(2048, num_lms, kernel_size=1, stride=1, padding=0)
162
+ self.y_layer = nn.Conv2d(2048, num_lms, kernel_size=1, stride=1, padding=0)
163
+ self.nb_x_layer = nn.Conv2d(2048, num_nb*num_lms, kernel_size=1, stride=1, padding=0)
164
+ self.nb_y_layer = nn.Conv2d(2048, num_nb*num_lms, kernel_size=1, stride=1, padding=0)
165
+
166
+ nn.init.normal_(self.cls_layer.weight, std=0.001)
167
+ if self.cls_layer.bias is not None:
168
+ nn.init.constant_(self.cls_layer.bias, 0)
169
+
170
+ nn.init.normal_(self.x_layer.weight, std=0.001)
171
+ if self.x_layer.bias is not None:
172
+ nn.init.constant_(self.x_layer.bias, 0)
173
+
174
+ nn.init.normal_(self.y_layer.weight, std=0.001)
175
+ if self.y_layer.bias is not None:
176
+ nn.init.constant_(self.y_layer.bias, 0)
177
+
178
+ nn.init.normal_(self.nb_x_layer.weight, std=0.001)
179
+ if self.nb_x_layer.bias is not None:
180
+ nn.init.constant_(self.nb_x_layer.bias, 0)
181
+
182
+ nn.init.normal_(self.nb_y_layer.weight, std=0.001)
183
+ if self.nb_y_layer.bias is not None:
184
+ nn.init.constant_(self.nb_y_layer.bias, 0)
185
+
186
+ def forward(self, x):
187
+ x = self.conv1(x)
188
+ x = self.bn1(x)
189
+ x = F.relu(x)
190
+ x = self.maxpool(x)
191
+ x = self.layer1(x)
192
+ x = self.layer2(x)
193
+ x = self.layer3(x)
194
+ x = self.layer4(x)
195
+ if self.net_stride == 128:
196
+ x = F.relu(self.bn5(self.layer5(x)))
197
+ x = F.relu(self.bn6(self.layer6(x)))
198
+ elif self.net_stride == 64:
199
+ x = F.relu(self.bn5(self.layer5(x)))
200
+ else:
201
+ pass
202
+ x1 = self.cls_layer(x)
203
+ x2 = self.x_layer(x)
204
+ x3 = self.y_layer(x)
205
+ x4 = self.nb_x_layer(x)
206
+ x5 = self.nb_y_layer(x)
207
+ return x1, x2, x3, x4, x5
208
+
209
+ # net_stride output_size
210
+ # 128 2x2
211
+ # 64 4x4
212
+ # 32 8x8
213
+ # pip regression, resnet18
214
+ class Pip_resnet18(nn.Module):
215
+ def __init__(self, resnet, num_nb, num_lms=68, input_size=256, net_stride=32):
216
+ super(Pip_resnet18, self).__init__()
217
+ self.num_nb = num_nb
218
+ self.num_lms = num_lms
219
+ self.input_size = input_size
220
+ self.net_stride = net_stride
221
+ self.conv1 = resnet.conv1
222
+ self.bn1 = resnet.bn1
223
+ self.maxpool = resnet.maxpool
224
+ self.sigmoid = nn.Sigmoid()
225
+ self.layer1 = resnet.layer1
226
+ self.layer2 = resnet.layer2
227
+ self.layer3 = resnet.layer3
228
+ self.layer4 = resnet.layer4
229
+ if self.net_stride == 128:
230
+ self.layer5 = nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1)
231
+ self.bn5 = nn.BatchNorm2d(512)
232
+ self.layer6 = nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1)
233
+ self.bn6 = nn.BatchNorm2d(512)
234
+ # init
235
+ nn.init.normal_(self.layer5.weight, std=0.001)
236
+ if self.layer5.bias is not None:
237
+ nn.init.constant_(self.layer5.bias, 0)
238
+ nn.init.constant_(self.bn5.weight, 1)
239
+ nn.init.constant_(self.bn5.bias, 0)
240
+
241
+ nn.init.normal_(self.layer6.weight, std=0.001)
242
+ if self.layer6.bias is not None:
243
+ nn.init.constant_(self.layer6.bias, 0)
244
+ nn.init.constant_(self.bn6.weight, 1)
245
+ nn.init.constant_(self.bn6.bias, 0)
246
+ elif self.net_stride == 64:
247
+ self.layer5 = nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1)
248
+ self.bn5 = nn.BatchNorm2d(512)
249
+ # init
250
+ nn.init.normal_(self.layer5.weight, std=0.001)
251
+ if self.layer5.bias is not None:
252
+ nn.init.constant_(self.layer5.bias, 0)
253
+ nn.init.constant_(self.bn5.weight, 1)
254
+ nn.init.constant_(self.bn5.bias, 0)
255
+ elif self.net_stride == 32:
256
+ pass
257
+ elif self.net_stride == 16:
258
+ self.deconv1 = nn.ConvTranspose2d(512, 512, kernel_size=4, stride=2, padding=1, bias=False)
259
+ self.bn_deconv1 = nn.BatchNorm2d(512)
260
+ nn.init.normal_(self.deconv1.weight, std=0.001)
261
+ if self.deconv1.bias is not None:
262
+ nn.init.constant_(self.deconv1.bias, 0)
263
+ nn.init.constant_(self.bn_deconv1.weight, 1)
264
+ nn.init.constant_(self.bn_deconv1.bias, 0)
265
+ else:
266
+ print('No such net_stride!')
267
+ exit(0)
268
+
269
+ self.cls_layer = nn.Conv2d(512, num_lms, kernel_size=1, stride=1, padding=0)
270
+ self.x_layer = nn.Conv2d(512, num_lms, kernel_size=1, stride=1, padding=0)
271
+ self.y_layer = nn.Conv2d(512, num_lms, kernel_size=1, stride=1, padding=0)
272
+ self.nb_x_layer = nn.Conv2d(512, num_nb*num_lms, kernel_size=1, stride=1, padding=0)
273
+ self.nb_y_layer = nn.Conv2d(512, num_nb*num_lms, kernel_size=1, stride=1, padding=0)
274
+
275
+ nn.init.normal_(self.cls_layer.weight, std=0.001)
276
+ if self.cls_layer.bias is not None:
277
+ nn.init.constant_(self.cls_layer.bias, 0)
278
+
279
+ nn.init.normal_(self.x_layer.weight, std=0.001)
280
+ if self.x_layer.bias is not None:
281
+ nn.init.constant_(self.x_layer.bias, 0)
282
+
283
+ nn.init.normal_(self.y_layer.weight, std=0.001)
284
+ if self.y_layer.bias is not None:
285
+ nn.init.constant_(self.y_layer.bias, 0)
286
+
287
+ nn.init.normal_(self.nb_x_layer.weight, std=0.001)
288
+ if self.nb_x_layer.bias is not None:
289
+ nn.init.constant_(self.nb_x_layer.bias, 0)
290
+
291
+ nn.init.normal_(self.nb_y_layer.weight, std=0.001)
292
+ if self.nb_y_layer.bias is not None:
293
+ nn.init.constant_(self.nb_y_layer.bias, 0)
294
+
295
+ def forward(self, x):
296
+ x = self.conv1(x)
297
+ x = self.bn1(x)
298
+ x = F.relu(x)
299
+ x = self.maxpool(x)
300
+ x = self.layer1(x)
301
+ x = self.layer2(x)
302
+ x = self.layer3(x)
303
+ x = self.layer4(x)
304
+ if self.net_stride == 128:
305
+ x = F.relu(self.bn5(self.layer5(x)))
306
+ x = F.relu(self.bn6(self.layer6(x)))
307
+ elif self.net_stride == 64:
308
+ x = F.relu(self.bn5(self.layer5(x)))
309
+ elif self.net_stride == 16:
310
+ x = F.relu(self.bn_deconv1(self.deconv1(x)))
311
+ else:
312
+ pass
313
+ x1 = self.cls_layer(x)
314
+ x2 = self.x_layer(x)
315
+ x3 = self.y_layer(x)
316
+ x4 = self.nb_x_layer(x)
317
+ x5 = self.nb_y_layer(x)
318
+ return x1, x2, x3, x4, x5
319
+
320
+ class Pip_mbnetv2(nn.Module):
321
+ def __init__(self, mbnet, num_nb, num_lms=68, input_size=256, net_stride=32):
322
+ super(Pip_mbnetv2, self).__init__()
323
+ self.num_nb = num_nb
324
+ self.num_lms = num_lms
325
+ self.input_size = input_size
326
+ self.net_stride = net_stride
327
+ self.features = mbnet.features
328
+ self.sigmoid = nn.Sigmoid()
329
+
330
+ self.cls_layer = nn.Conv2d(1280, num_lms, kernel_size=1, stride=1, padding=0)
331
+ self.x_layer = nn.Conv2d(1280, num_lms, kernel_size=1, stride=1, padding=0)
332
+ self.y_layer = nn.Conv2d(1280, num_lms, kernel_size=1, stride=1, padding=0)
333
+ self.nb_x_layer = nn.Conv2d(1280, num_nb*num_lms, kernel_size=1, stride=1, padding=0)
334
+ self.nb_y_layer = nn.Conv2d(1280, num_nb*num_lms, kernel_size=1, stride=1, padding=0)
335
+
336
+ nn.init.normal_(self.cls_layer.weight, std=0.001)
337
+ if self.cls_layer.bias is not None:
338
+ nn.init.constant_(self.cls_layer.bias, 0)
339
+
340
+ nn.init.normal_(self.x_layer.weight, std=0.001)
341
+ if self.x_layer.bias is not None:
342
+ nn.init.constant_(self.x_layer.bias, 0)
343
+
344
+ nn.init.normal_(self.y_layer.weight, std=0.001)
345
+ if self.y_layer.bias is not None:
346
+ nn.init.constant_(self.y_layer.bias, 0)
347
+
348
+ nn.init.normal_(self.nb_x_layer.weight, std=0.001)
349
+ if self.nb_x_layer.bias is not None:
350
+ nn.init.constant_(self.nb_x_layer.bias, 0)
351
+
352
+ nn.init.normal_(self.nb_y_layer.weight, std=0.001)
353
+ if self.nb_y_layer.bias is not None:
354
+ nn.init.constant_(self.nb_y_layer.bias, 0)
355
+
356
+ def forward(self, x):
357
+ x = self.features(x)
358
+ x1 = self.cls_layer(x)
359
+ x2 = self.x_layer(x)
360
+ x3 = self.y_layer(x)
361
+ x4 = self.nb_x_layer(x)
362
+ x5 = self.nb_y_layer(x)
363
+ return x1, x2, x3, x4, x5
364
+
365
+ class Pip_mbnetv3(nn.Module):
366
+ def __init__(self, mbnet, num_nb, num_lms=68, input_size=256, net_stride=32):
367
+ super(Pip_mbnetv3, self).__init__()
368
+ self.num_nb = num_nb
369
+ self.num_lms = num_lms
370
+ self.input_size = input_size
371
+ self.net_stride = net_stride
372
+ self.features = mbnet.features
373
+ self.conv = mbnet.conv
374
+ self.sigmoid = nn.Sigmoid()
375
+
376
+ self.cls_layer = nn.Conv2d(960, num_lms, kernel_size=1, stride=1, padding=0)
377
+ self.x_layer = nn.Conv2d(960, num_lms, kernel_size=1, stride=1, padding=0)
378
+ self.y_layer = nn.Conv2d(960, num_lms, kernel_size=1, stride=1, padding=0)
379
+ self.nb_x_layer = nn.Conv2d(960, num_nb*num_lms, kernel_size=1, stride=1, padding=0)
380
+ self.nb_y_layer = nn.Conv2d(960, num_nb*num_lms, kernel_size=1, stride=1, padding=0)
381
+
382
+ nn.init.normal_(self.cls_layer.weight, std=0.001)
383
+ if self.cls_layer.bias is not None:
384
+ nn.init.constant_(self.cls_layer.bias, 0)
385
+
386
+ nn.init.normal_(self.x_layer.weight, std=0.001)
387
+ if self.x_layer.bias is not None:
388
+ nn.init.constant_(self.x_layer.bias, 0)
389
+
390
+ nn.init.normal_(self.y_layer.weight, std=0.001)
391
+ if self.y_layer.bias is not None:
392
+ nn.init.constant_(self.y_layer.bias, 0)
393
+
394
+ nn.init.normal_(self.nb_x_layer.weight, std=0.001)
395
+ if self.nb_x_layer.bias is not None:
396
+ nn.init.constant_(self.nb_x_layer.bias, 0)
397
+
398
+ nn.init.normal_(self.nb_y_layer.weight, std=0.001)
399
+ if self.nb_y_layer.bias is not None:
400
+ nn.init.constant_(self.nb_y_layer.bias, 0)
401
+
402
+ def forward(self, x):
403
+ x = self.features(x)
404
+ x = self.conv(x)
405
+ x1 = self.cls_layer(x)
406
+ x2 = self.x_layer(x)
407
+ x3 = self.y_layer(x)
408
+ x4 = self.nb_x_layer(x)
409
+ x5 = self.nb_y_layer(x)
410
+ return x1, x2, x3, x4, x5
411
+
412
+
413
+ if __name__ == '__main__':
414
+ pass
415
+
third_party/PIPNet/lib/networks_gssl.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ import torchvision.models as models
5
+ import numpy as np
6
+ import time
7
+
8
+ # net_stride output_size
9
+ # 128 2x2
10
+ # 64 4x4
11
+ # 32 8x8
12
+ # pip regression, resnet18, for GSSL
13
+ class Pip_resnet18(nn.Module):
14
+ def __init__(self, resnet, num_nb, num_lms=68, input_size=256, net_stride=32):
15
+ super(Pip_resnet18, self).__init__()
16
+ self.num_nb = num_nb
17
+ self.num_lms = num_lms
18
+ self.input_size = input_size
19
+ self.net_stride = net_stride
20
+ self.conv1 = resnet.conv1
21
+ self.bn1 = resnet.bn1
22
+ self.maxpool = resnet.maxpool
23
+ self.sigmoid = nn.Sigmoid()
24
+ self.layer1 = resnet.layer1
25
+ self.layer2 = resnet.layer2
26
+ self.layer3 = resnet.layer3
27
+ self.layer4 = resnet.layer4
28
+
29
+ self.my_maxpool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
30
+
31
+ self.cls_layer = nn.Conv2d(512, num_lms, kernel_size=1, stride=1, padding=0)
32
+ self.x_layer = nn.Conv2d(512, num_lms, kernel_size=1, stride=1, padding=0)
33
+ self.y_layer = nn.Conv2d(512, num_lms, kernel_size=1, stride=1, padding=0)
34
+ self.nb_x_layer = nn.Conv2d(512, num_nb*num_lms, kernel_size=1, stride=1, padding=0)
35
+ self.nb_y_layer = nn.Conv2d(512, num_nb*num_lms, kernel_size=1, stride=1, padding=0)
36
+
37
+ # init
38
+ nn.init.normal_(self.cls_layer.weight, std=0.001)
39
+ if self.cls_layer.bias is not None:
40
+ nn.init.constant_(self.cls_layer.bias, 0)
41
+
42
+ nn.init.normal_(self.x_layer.weight, std=0.001)
43
+ if self.x_layer.bias is not None:
44
+ nn.init.constant_(self.x_layer.bias, 0)
45
+
46
+ nn.init.normal_(self.y_layer.weight, std=0.001)
47
+ if self.y_layer.bias is not None:
48
+ nn.init.constant_(self.y_layer.bias, 0)
49
+
50
+ nn.init.normal_(self.nb_x_layer.weight, std=0.001)
51
+ if self.nb_x_layer.bias is not None:
52
+ nn.init.constant_(self.nb_x_layer.bias, 0)
53
+
54
+ nn.init.normal_(self.nb_y_layer.weight, std=0.001)
55
+ if self.nb_y_layer.bias is not None:
56
+ nn.init.constant_(self.nb_y_layer.bias, 0)
57
+
58
+ def forward(self, x):
59
+ x = self.conv1(x)
60
+ x = self.bn1(x)
61
+ x = F.relu(x)
62
+ x = self.maxpool(x)
63
+ x = self.layer1(x)
64
+ x = self.layer2(x)
65
+ x = self.layer3(x)
66
+ x = self.layer4(x)
67
+ cls1 = self.cls_layer(x)
68
+ offset_x = self.x_layer(x)
69
+ offset_y = self.y_layer(x)
70
+ nb_x = self.nb_x_layer(x)
71
+ nb_y = self.nb_y_layer(x)
72
+ x = self.my_maxpool(x)
73
+ cls2 = self.cls_layer(x)
74
+ x = self.my_maxpool(x)
75
+ cls3 = self.cls_layer(x)
76
+ return cls1, cls2, cls3, offset_x, offset_y, nb_x, nb_y
77
+
78
+ if __name__ == '__main__':
79
+ pass
80
+
third_party/PIPNet/lib/preprocess.py ADDED
@@ -0,0 +1,554 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, cv2
2
+ import hdf5storage
3
+ import numpy as np
4
+ import sys
5
+
6
+ def process_300w(root_folder, folder_name, image_name, label_name, target_size):
7
+ image_path = os.path.join(root_folder, folder_name, image_name)
8
+ label_path = os.path.join(root_folder, folder_name, label_name)
9
+
10
+ with open(label_path, 'r') as ff:
11
+ anno = ff.readlines()[3:-1]
12
+ anno = [x.strip().split() for x in anno]
13
+ anno = [[int(float(x[0])), int(float(x[1]))] for x in anno]
14
+ image = cv2.imread(image_path)
15
+ image_height, image_width, _ = image.shape
16
+ anno_x = [x[0] for x in anno]
17
+ anno_y = [x[1] for x in anno]
18
+ bbox_xmin = min(anno_x)
19
+ bbox_ymin = min(anno_y)
20
+ bbox_xmax = max(anno_x)
21
+ bbox_ymax = max(anno_y)
22
+ bbox_width = bbox_xmax - bbox_xmin
23
+ bbox_height = bbox_ymax - bbox_ymin
24
+ scale = 1.1
25
+ bbox_xmin -= int((scale-1)/2*bbox_width)
26
+ bbox_ymin -= int((scale-1)/2*bbox_height)
27
+ bbox_width *= scale
28
+ bbox_height *= scale
29
+ bbox_width = int(bbox_width)
30
+ bbox_height = int(bbox_height)
31
+ bbox_xmin = max(bbox_xmin, 0)
32
+ bbox_ymin = max(bbox_ymin, 0)
33
+ bbox_width = min(bbox_width, image_width-bbox_xmin-1)
34
+ bbox_height = min(bbox_height, image_height-bbox_ymin-1)
35
+ anno = [[(x-bbox_xmin)/bbox_width, (y-bbox_ymin)/bbox_height] for x,y in anno]
36
+
37
+ bbox_xmax = bbox_xmin + bbox_width
38
+ bbox_ymax = bbox_ymin + bbox_height
39
+ image_crop = image[bbox_ymin:bbox_ymax, bbox_xmin:bbox_xmax, :]
40
+ image_crop = cv2.resize(image_crop, (target_size, target_size))
41
+ return image_crop, anno
42
+
43
+ def process_cofw(image, bbox, anno, target_size):
44
+ image_height, image_width, _ = image.shape
45
+ anno_x = anno[:29]
46
+ anno_y = anno[29:58]
47
+ ################################
48
+ xmin, ymin, width, height = bbox
49
+ xmax = xmin + width -1
50
+ ymax = ymin + height -1
51
+ ################################
52
+ xmin = max(xmin, 0)
53
+ ymin = max(ymin, 0)
54
+ xmax = min(xmax, image_width-1)
55
+ ymax = min(ymax, image_height-1)
56
+ anno_x = (anno_x - xmin) / (xmax - xmin)
57
+ anno_y = (anno_y - ymin) / (ymax - ymin)
58
+ anno = np.concatenate([anno_x.reshape(-1,1), anno_y.reshape(-1,1)], axis=1)
59
+ anno = list(anno)
60
+ anno = [list(x) for x in anno]
61
+ image_crop = image[int(ymin):int(ymax), int(xmin):int(xmax), :]
62
+ image_crop = cv2.resize(image_crop, (target_size, target_size))
63
+ return image_crop, anno
64
+
65
+ def process_wflw(anno, target_size):
66
+ image_name = anno[-1]
67
+ image_path = os.path.join('..', 'data', 'WFLW', 'WFLW_images', image_name)
68
+ image = cv2.imread(image_path)
69
+ image_height, image_width, _ = image.shape
70
+ lms = anno[:196]
71
+ lms = [float(x) for x in lms]
72
+ lms_x = lms[0::2]
73
+ lms_y = lms[1::2]
74
+ lms_x = [x if x >=0 else 0 for x in lms_x]
75
+ lms_x = [x if x <=image_width else image_width for x in lms_x]
76
+ lms_y = [y if y >=0 else 0 for y in lms_y]
77
+ lms_y = [y if y <=image_height else image_height for y in lms_y]
78
+ lms = [[x,y] for x,y in zip(lms_x, lms_y)]
79
+ lms = [x for z in lms for x in z]
80
+ bbox = anno[196:200]
81
+ bbox = [float(x) for x in bbox]
82
+ attrs = anno[200:206]
83
+ attrs = np.array([int(x) for x in attrs])
84
+ bbox_xmin, bbox_ymin, bbox_xmax, bbox_ymax = bbox
85
+
86
+ width = bbox_xmax - bbox_xmin
87
+ height = bbox_ymax - bbox_ymin
88
+ scale = 1.2
89
+ bbox_xmin -= width * (scale-1)/2
90
+ bbox_ymin -= height * (scale-1)/2
91
+ bbox_xmax += width * (scale-1)/2
92
+ bbox_ymax += height * (scale-1)/2
93
+ bbox_xmin = max(bbox_xmin, 0)
94
+ bbox_ymin = max(bbox_ymin, 0)
95
+ bbox_xmax = min(bbox_xmax, image_width-1)
96
+ bbox_ymax = min(bbox_ymax, image_height-1)
97
+ width = bbox_xmax - bbox_xmin
98
+ height = bbox_ymax - bbox_ymin
99
+ image_crop = image[int(bbox_ymin):int(bbox_ymax), int(bbox_xmin):int(bbox_xmax), :]
100
+ image_crop = cv2.resize(image_crop, (target_size, target_size))
101
+
102
+ tmp1 = [bbox_xmin, bbox_ymin]*98
103
+ tmp1 = np.array(tmp1)
104
+ tmp2 = [width, height]*98
105
+ tmp2 = np.array(tmp2)
106
+ lms = np.array(lms) - tmp1
107
+ lms = lms / tmp2
108
+ lms = lms.tolist()
109
+ lms = zip(lms[0::2], lms[1::2])
110
+ return image_crop, list(lms)
111
+
112
+ def process_aflw(root_folder, image_name, bbox, anno, target_size):
113
+ image = cv2.imread(os.path.join(root_folder, 'AFLW', 'flickr', image_name))
114
+ image_height, image_width, _ = image.shape
115
+ anno_x = anno[:19]
116
+ anno_y = anno[19:]
117
+ anno_x = [x if x >=0 else 0 for x in anno_x]
118
+ anno_x = [x if x <=image_width else image_width for x in anno_x]
119
+ anno_y = [y if y >=0 else 0 for y in anno_y]
120
+ anno_y = [y if y <=image_height else image_height for y in anno_y]
121
+ anno_x_min = min(anno_x)
122
+ anno_x_max = max(anno_x)
123
+ anno_y_min = min(anno_y)
124
+ anno_y_max = max(anno_y)
125
+ xmin, xmax, ymin, ymax = bbox
126
+
127
+ xmin = max(xmin, 0)
128
+ ymin = max(ymin, 0)
129
+ xmax = min(xmax, image_width-1)
130
+ ymax = min(ymax, image_height-1)
131
+
132
+ image_crop = image[int(ymin):int(ymax), int(xmin):int(xmax), :]
133
+ image_crop = cv2.resize(image_crop, (target_size, target_size))
134
+
135
+ anno_x = (np.array(anno_x) - xmin) / (xmax - xmin)
136
+ anno_y = (np.array(anno_y) - ymin) / (ymax - ymin)
137
+
138
+ anno = np.concatenate([anno_x.reshape(-1,1), anno_y.reshape(-1,1)], axis=1).flatten()
139
+ anno = zip(anno[0::2], anno[1::2])
140
+ return image_crop, anno
141
+
142
+ def gen_meanface(root_folder, data_name):
143
+ with open(os.path.join(root_folder, data_name, 'train.txt'), 'r') as f:
144
+ annos = f.readlines()
145
+ annos = [x.strip().split()[1:] for x in annos]
146
+ annos = [[float(x) for x in anno] for anno in annos]
147
+ annos = np.array(annos)
148
+ meanface = np.mean(annos, axis=0)
149
+ meanface = meanface.tolist()
150
+ meanface = [str(x) for x in meanface]
151
+
152
+ with open(os.path.join(root_folder, data_name, 'meanface.txt'), 'w') as f:
153
+ f.write(' '.join(meanface))
154
+
155
+ def convert_wflw(root_folder, data_name):
156
+ with open(os.path.join('../data/WFLW/test.txt'), 'r') as f:
157
+ annos = f.readlines()
158
+ annos = [x.strip().split() for x in annos]
159
+ annos_new = []
160
+ for anno in annos:
161
+ annos_new.append([])
162
+ # name
163
+ annos_new[-1].append(anno[0])
164
+ anno = anno[1:]
165
+ # jaw
166
+ for i in range(17):
167
+ annos_new[-1].append(anno[i*2*2])
168
+ annos_new[-1].append(anno[i*2*2+1])
169
+ # left eyebrow
170
+ annos_new[-1].append(anno[33*2])
171
+ annos_new[-1].append(anno[33*2+1])
172
+ annos_new[-1].append(anno[34*2])
173
+ annos_new[-1].append(str((float(anno[34*2+1])+float(anno[41*2+1]))/2))
174
+ annos_new[-1].append(anno[35*2])
175
+ annos_new[-1].append(str((float(anno[35*2+1])+float(anno[40*2+1]))/2))
176
+ annos_new[-1].append(anno[36*2])
177
+ annos_new[-1].append(str((float(anno[36*2+1])+float(anno[39*2+1]))/2))
178
+ annos_new[-1].append(anno[37*2])
179
+ annos_new[-1].append(str((float(anno[37*2+1])+float(anno[38*2+1]))/2))
180
+ # right eyebrow
181
+ annos_new[-1].append(anno[42*2])
182
+ annos_new[-1].append(str((float(anno[42*2+1])+float(anno[50*2+1]))/2))
183
+ annos_new[-1].append(anno[43*2])
184
+ annos_new[-1].append(str((float(anno[43*2+1])+float(anno[49*2+1]))/2))
185
+ annos_new[-1].append(anno[44*2])
186
+ annos_new[-1].append(str((float(anno[44*2+1])+float(anno[48*2+1]))/2))
187
+ annos_new[-1].append(anno[45*2])
188
+ annos_new[-1].append(str((float(anno[45*2+1])+float(anno[47*2+1]))/2))
189
+ annos_new[-1].append(anno[46*2])
190
+ annos_new[-1].append(anno[46*2+1])
191
+ # nose
192
+ for i in range(51, 60):
193
+ annos_new[-1].append(anno[i*2])
194
+ annos_new[-1].append(anno[i*2+1])
195
+ # left eye
196
+ annos_new[-1].append(anno[60*2])
197
+ annos_new[-1].append(anno[60*2+1])
198
+ annos_new[-1].append(str(0.666*float(anno[61*2])+0.333*float(anno[62*2])))
199
+ annos_new[-1].append(str(0.666*float(anno[61*2+1])+0.333*float(anno[62*2+1])))
200
+ annos_new[-1].append(str(0.666*float(anno[63*2])+0.333*float(anno[62*2])))
201
+ annos_new[-1].append(str(0.666*float(anno[63*2+1])+0.333*float(anno[62*2+1])))
202
+ annos_new[-1].append(anno[64*2])
203
+ annos_new[-1].append(anno[64*2+1])
204
+ annos_new[-1].append(str(0.666*float(anno[65*2])+0.333*float(anno[66*2])))
205
+ annos_new[-1].append(str(0.666*float(anno[65*2+1])+0.333*float(anno[66*2+1])))
206
+ annos_new[-1].append(str(0.666*float(anno[67*2])+0.333*float(anno[66*2])))
207
+ annos_new[-1].append(str(0.666*float(anno[67*2+1])+0.333*float(anno[66*2+1])))
208
+ # right eye
209
+ annos_new[-1].append(anno[68*2])
210
+ annos_new[-1].append(anno[68*2+1])
211
+ annos_new[-1].append(str(0.666*float(anno[69*2])+0.333*float(anno[70*2])))
212
+ annos_new[-1].append(str(0.666*float(anno[69*2+1])+0.333*float(anno[70*2+1])))
213
+ annos_new[-1].append(str(0.666*float(anno[71*2])+0.333*float(anno[70*2])))
214
+ annos_new[-1].append(str(0.666*float(anno[71*2+1])+0.333*float(anno[70*2+1])))
215
+ annos_new[-1].append(anno[72*2])
216
+ annos_new[-1].append(anno[72*2+1])
217
+ annos_new[-1].append(str(0.666*float(anno[73*2])+0.333*float(anno[74*2])))
218
+ annos_new[-1].append(str(0.666*float(anno[73*2+1])+0.333*float(anno[74*2+1])))
219
+ annos_new[-1].append(str(0.666*float(anno[75*2])+0.333*float(anno[74*2])))
220
+ annos_new[-1].append(str(0.666*float(anno[75*2+1])+0.333*float(anno[74*2+1])))
221
+ # mouth
222
+ for i in range(76, 96):
223
+ annos_new[-1].append(anno[i*2])
224
+ annos_new[-1].append(anno[i*2+1])
225
+
226
+ with open(os.path.join(root_folder, data_name, 'test.txt'), 'w') as f:
227
+ for anno in annos_new:
228
+ f.write(' '.join(anno)+'\n')
229
+
230
+
231
+ def gen_data(root_folder, data_name, target_size):
232
+ if not os.path.exists(os.path.join(root_folder, data_name, 'images_train')):
233
+ os.mkdir(os.path.join(root_folder, data_name, 'images_train'))
234
+ if not os.path.exists(os.path.join(root_folder, data_name, 'images_test')):
235
+ os.mkdir(os.path.join(root_folder, data_name, 'images_test'))
236
+
237
+ ################################################################################################################
238
+ if data_name == 'data_300W':
239
+ folders_train = ['afw', 'helen/trainset', 'lfpw/trainset']
240
+ annos_train = {}
241
+ for folder_train in folders_train:
242
+ all_files = sorted(os.listdir(os.path.join(root_folder, data_name, folder_train)))
243
+ image_files = [x for x in all_files if '.pts' not in x]
244
+ label_files = [x for x in all_files if '.pts' in x]
245
+ assert len(image_files) == len(label_files)
246
+ for image_name, label_name in zip(image_files, label_files):
247
+ print(image_name)
248
+ image_crop, anno = process_300w(os.path.join(root_folder, 'data_300W'), folder_train, image_name, label_name, target_size)
249
+ image_crop_name = folder_train.replace('/', '_')+'_'+image_name
250
+ cv2.imwrite(os.path.join(root_folder, data_name, 'images_train', image_crop_name), image_crop)
251
+ annos_train[image_crop_name] = anno
252
+ with open(os.path.join(root_folder, data_name, 'train.txt'), 'w') as f:
253
+ for image_crop_name, anno in annos_train.items():
254
+ f.write(image_crop_name+' ')
255
+ for x,y in anno:
256
+ f.write(str(x)+' '+str(y)+' ')
257
+ f.write('\n')
258
+
259
+
260
+ folders_test = ['helen/testset', 'lfpw/testset', 'ibug']
261
+ annos_test = {}
262
+ for folder_test in folders_test:
263
+ all_files = sorted(os.listdir(os.path.join(root_folder, data_name, folder_test)))
264
+ image_files = [x for x in all_files if '.pts' not in x]
265
+ label_files = [x for x in all_files if '.pts' in x]
266
+ assert len(image_files) == len(label_files)
267
+ for image_name, label_name in zip(image_files, label_files):
268
+ print(image_name)
269
+ image_crop, anno = process_300w(os.path.join(root_folder, 'data_300W'), folder_test, image_name, label_name, target_size)
270
+ image_crop_name = folder_test.replace('/', '_')+'_'+image_name
271
+ cv2.imwrite(os.path.join(root_folder, data_name, 'images_test', image_crop_name), image_crop)
272
+ annos_test[image_crop_name] = anno
273
+ with open(os.path.join(root_folder, data_name, 'test.txt'), 'w') as f:
274
+ for image_crop_name, anno in annos_test.items():
275
+ f.write(image_crop_name+' ')
276
+ for x,y in anno:
277
+ f.write(str(x)+' '+str(y)+' ')
278
+ f.write('\n')
279
+
280
+ annos = None
281
+ with open(os.path.join(root_folder, data_name, 'test.txt'), 'r') as f:
282
+ annos = f.readlines()
283
+ with open(os.path.join(root_folder, data_name, 'test_common.txt'), 'w') as f:
284
+ for anno in annos:
285
+ if not 'ibug' in anno:
286
+ f.write(anno)
287
+ with open(os.path.join(root_folder, data_name, 'test_challenge.txt'), 'w') as f:
288
+ for anno in annos:
289
+ if 'ibug' in anno:
290
+ f.write(anno)
291
+
292
+ gen_meanface(root_folder, data_name)
293
+ ################################################################################################################
294
+ elif data_name == 'COFW':
295
+ train_file = 'COFW_train_color.mat'
296
+ train_mat = hdf5storage.loadmat(os.path.join(root_folder, 'COFW', train_file))
297
+ images = train_mat['IsTr']
298
+ bboxes = train_mat['bboxesTr']
299
+ annos = train_mat['phisTr']
300
+
301
+ count = 1
302
+ with open(os.path.join(root_folder, 'COFW', 'train.txt'), 'w') as f:
303
+ for i in range(images.shape[0]):
304
+ image = images[i, 0]
305
+ # grayscale
306
+ if len(image.shape) == 2:
307
+ image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
308
+ # swap rgb channel to bgr
309
+ else:
310
+ image = image[:,:,::-1]
311
+ bbox = bboxes[i, :]
312
+ anno = annos[i, :]
313
+ image_crop, anno = process_cofw(image, bbox, anno, target_size)
314
+ pad_num = 4-len(str(count))
315
+ image_crop_name = 'cofw_train_' + '0' * pad_num + str(count) + '.jpg'
316
+ print(image_crop_name)
317
+ cv2.imwrite(os.path.join(root_folder, 'COFW', 'images_train', image_crop_name), image_crop)
318
+ f.write(image_crop_name+' ')
319
+ for x,y in anno:
320
+ f.write(str(x)+' '+str(y)+' ')
321
+ f.write('\n')
322
+ count += 1
323
+
324
+ test_file = 'COFW_test_color.mat'
325
+ test_mat = hdf5storage.loadmat(os.path.join(root_folder, 'COFW', test_file))
326
+ images = test_mat['IsT']
327
+ bboxes = test_mat['bboxesT']
328
+ annos = test_mat['phisT']
329
+
330
+ count = 1
331
+ with open(os.path.join(root_folder, 'COFW', 'test.txt'), 'w') as f:
332
+ for i in range(images.shape[0]):
333
+ image = images[i, 0]
334
+ # grayscale
335
+ if len(image.shape) == 2:
336
+ image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
337
+ # swap rgb channel to bgr
338
+ else:
339
+ image = image[:,:,::-1]
340
+ bbox = bboxes[i, :]
341
+ anno = annos[i, :]
342
+ image_crop, anno = process_cofw(image, bbox, anno, target_size)
343
+ pad_num = 4-len(str(count))
344
+ image_crop_name = 'cofw_test_' + '0' * pad_num + str(count) + '.jpg'
345
+ print(image_crop_name)
346
+ cv2.imwrite(os.path.join(root_folder, 'COFW', 'images_test', image_crop_name), image_crop)
347
+ f.write(image_crop_name+' ')
348
+ for x,y in anno:
349
+ f.write(str(x)+' '+str(y)+' ')
350
+ f.write('\n')
351
+ count += 1
352
+ gen_meanface(root_folder, data_name)
353
+ ################################################################################################################
354
+ elif data_name == 'WFLW':
355
+ train_file = 'list_98pt_rect_attr_train.txt'
356
+ with open(os.path.join(root_folder, 'WFLW', 'WFLW_annotations', 'list_98pt_rect_attr_train_test', train_file), 'r') as f:
357
+ annos_train = f.readlines()
358
+ annos_train = [x.strip().split() for x in annos_train]
359
+ count = 1
360
+ with open(os.path.join(root_folder, 'WFLW', 'train.txt'), 'w') as f:
361
+ for anno_train in annos_train:
362
+ image_crop, anno = process_wflw(anno_train, target_size)
363
+ pad_num = 4-len(str(count))
364
+ image_crop_name = 'wflw_train_' + '0' * pad_num + str(count) + '.jpg'
365
+ print(image_crop_name)
366
+ cv2.imwrite(os.path.join(root_folder, 'WFLW', 'images_train', image_crop_name), image_crop)
367
+ f.write(image_crop_name+' ')
368
+ for x,y in anno:
369
+ f.write(str(x)+' '+str(y)+' ')
370
+ f.write('\n')
371
+ count += 1
372
+
373
+ test_file = 'list_98pt_rect_attr_test.txt'
374
+ with open(os.path.join(root_folder, 'WFLW', 'WFLW_annotations', 'list_98pt_rect_attr_train_test', test_file), 'r') as f:
375
+ annos_test = f.readlines()
376
+ annos_test = [x.strip().split() for x in annos_test]
377
+ names_mapping = {}
378
+ count = 1
379
+ with open(os.path.join(root_folder, 'WFLW', 'test.txt'), 'w') as f:
380
+ for anno_test in annos_test:
381
+ image_crop, anno = process_wflw(anno_test, target_size)
382
+ pad_num = 4-len(str(count))
383
+ image_crop_name = 'wflw_test_' + '0' * pad_num + str(count) + '.jpg'
384
+ print(image_crop_name)
385
+ names_mapping[anno_test[0]+'_'+anno_test[-1]] = [image_crop_name, anno]
386
+ cv2.imwrite(os.path.join(root_folder, 'WFLW', 'images_test', image_crop_name), image_crop)
387
+ f.write(image_crop_name+' ')
388
+ for x,y in list(anno):
389
+ f.write(str(x)+' '+str(y)+' ')
390
+ f.write('\n')
391
+ count += 1
392
+
393
+ test_pose_file = 'list_98pt_test_largepose.txt'
394
+ with open(os.path.join(root_folder, 'WFLW', 'WFLW_annotations', 'list_98pt_test', test_pose_file), 'r') as f:
395
+ annos_pose_test = f.readlines()
396
+ names_pose = [x.strip().split() for x in annos_pose_test]
397
+ names_pose = [x[0]+'_'+x[-1] for x in names_pose]
398
+ with open(os.path.join(root_folder, 'WFLW', 'test_pose.txt'), 'w') as f:
399
+ for name_pose in names_pose:
400
+ if name_pose in names_mapping:
401
+ image_crop_name, anno = names_mapping[name_pose]
402
+ f.write(image_crop_name+' ')
403
+ for x,y in anno:
404
+ f.write(str(x)+' '+str(y)+' ')
405
+ f.write('\n')
406
+ else:
407
+ print('error!')
408
+ exit(0)
409
+
410
+ test_expr_file = 'list_98pt_test_expression.txt'
411
+ with open(os.path.join(root_folder, 'WFLW', 'WFLW_annotations', 'list_98pt_test', test_expr_file), 'r') as f:
412
+ annos_expr_test = f.readlines()
413
+ names_expr = [x.strip().split() for x in annos_expr_test]
414
+ names_expr = [x[0]+'_'+x[-1] for x in names_expr]
415
+ with open(os.path.join(root_folder, 'WFLW', 'test_expr.txt'), 'w') as f:
416
+ for name_expr in names_expr:
417
+ if name_expr in names_mapping:
418
+ image_crop_name, anno = names_mapping[name_expr]
419
+ f.write(image_crop_name+' ')
420
+ for x,y in anno:
421
+ f.write(str(x)+' '+str(y)+' ')
422
+ f.write('\n')
423
+ else:
424
+ print('error!')
425
+ exit(0)
426
+
427
+ test_illu_file = 'list_98pt_test_illumination.txt'
428
+ with open(os.path.join(root_folder, 'WFLW', 'WFLW_annotations', 'list_98pt_test', test_illu_file), 'r') as f:
429
+ annos_illu_test = f.readlines()
430
+ names_illu = [x.strip().split() for x in annos_illu_test]
431
+ names_illu = [x[0]+'_'+x[-1] for x in names_illu]
432
+ with open(os.path.join(root_folder, 'WFLW', 'test_illu.txt'), 'w') as f:
433
+ for name_illu in names_illu:
434
+ if name_illu in names_mapping:
435
+ image_crop_name, anno = names_mapping[name_illu]
436
+ f.write(image_crop_name+' ')
437
+ for x,y in anno:
438
+ f.write(str(x)+' '+str(y)+' ')
439
+ f.write('\n')
440
+ else:
441
+ print('error!')
442
+ exit(0)
443
+
444
+ test_mu_file = 'list_98pt_test_makeup.txt'
445
+ with open(os.path.join(root_folder, 'WFLW', 'WFLW_annotations', 'list_98pt_test', test_mu_file), 'r') as f:
446
+ annos_mu_test = f.readlines()
447
+ names_mu = [x.strip().split() for x in annos_mu_test]
448
+ names_mu = [x[0]+'_'+x[-1] for x in names_mu]
449
+ with open(os.path.join(root_folder, 'WFLW', 'test_mu.txt'), 'w') as f:
450
+ for name_mu in names_mu:
451
+ if name_mu in names_mapping:
452
+ image_crop_name, anno = names_mapping[name_mu]
453
+ f.write(image_crop_name+' ')
454
+ for x,y in anno:
455
+ f.write(str(x)+' '+str(y)+' ')
456
+ f.write('\n')
457
+ else:
458
+ print('error!')
459
+ exit(0)
460
+
461
+ test_occu_file = 'list_98pt_test_occlusion.txt'
462
+ with open(os.path.join(root_folder, 'WFLW', 'WFLW_annotations', 'list_98pt_test', test_occu_file), 'r') as f:
463
+ annos_occu_test = f.readlines()
464
+ names_occu = [x.strip().split() for x in annos_occu_test]
465
+ names_occu = [x[0]+'_'+x[-1] for x in names_occu]
466
+ with open(os.path.join(root_folder, 'WFLW', 'test_occu.txt'), 'w') as f:
467
+ for name_occu in names_occu:
468
+ if name_occu in names_mapping:
469
+ image_crop_name, anno = names_mapping[name_occu]
470
+ f.write(image_crop_name+' ')
471
+ for x,y in anno:
472
+ f.write(str(x)+' '+str(y)+' ')
473
+ f.write('\n')
474
+ else:
475
+ print('error!')
476
+ exit(0)
477
+
478
+
479
+ test_blur_file = 'list_98pt_test_blur.txt'
480
+ with open(os.path.join(root_folder, 'WFLW', 'WFLW_annotations', 'list_98pt_test', test_blur_file), 'r') as f:
481
+ annos_blur_test = f.readlines()
482
+ names_blur = [x.strip().split() for x in annos_blur_test]
483
+ names_blur = [x[0]+'_'+x[-1] for x in names_blur]
484
+ with open(os.path.join(root_folder, 'WFLW', 'test_blur.txt'), 'w') as f:
485
+ for name_blur in names_blur:
486
+ if name_blur in names_mapping:
487
+ image_crop_name, anno = names_mapping[name_blur]
488
+ f.write(image_crop_name+' ')
489
+ for x,y in anno:
490
+ f.write(str(x)+' '+str(y)+' ')
491
+ f.write('\n')
492
+ else:
493
+ print('error!')
494
+ exit(0)
495
+ gen_meanface(root_folder, data_name)
496
+ ################################################################################################################
497
+ elif data_name == 'AFLW':
498
+ mat = hdf5storage.loadmat('../data/AFLW/AFLWinfo_release.mat')
499
+ bboxes = mat['bbox']
500
+ annos = mat['data']
501
+ mask_new = mat['mask_new']
502
+ nameList = mat['nameList']
503
+ ra = mat['ra'][0]
504
+ train_indices = ra[:20000]
505
+ test_indices = ra[20000:]
506
+
507
+ with open(os.path.join(root_folder, 'AFLW', 'train.txt'), 'w') as f:
508
+ for index in train_indices:
509
+ # from matlab index
510
+ image_name = nameList[index-1][0][0]
511
+ bbox = bboxes[index-1]
512
+ anno = annos[index-1]
513
+ image_crop, anno = process_aflw(root_folder, image_name, bbox, anno, target_size)
514
+ pad_num = 5-len(str(index))
515
+ image_crop_name = 'aflw_train_' + '0' * pad_num + str(index) + '.jpg'
516
+ print(image_crop_name)
517
+ cv2.imwrite(os.path.join(root_folder, 'AFLW', 'images_train', image_crop_name), image_crop)
518
+ f.write(image_crop_name+' ')
519
+ for x,y in anno:
520
+ f.write(str(x)+' '+str(y)+' ')
521
+ f.write('\n')
522
+
523
+ with open(os.path.join(root_folder, 'AFLW', 'test.txt'), 'w') as f:
524
+ for index in test_indices:
525
+ # from matlab index
526
+ image_name = nameList[index-1][0][0]
527
+ bbox = bboxes[index-1]
528
+ anno = annos[index-1]
529
+ image_crop, anno = process_aflw(root_folder, image_name, bbox, anno, target_size)
530
+ pad_num = 5-len(str(index))
531
+ image_crop_name = 'aflw_test_' + '0' * pad_num + str(index) + '.jpg'
532
+ print(image_crop_name)
533
+ cv2.imwrite(os.path.join(root_folder, 'AFLW', 'images_test', image_crop_name), image_crop)
534
+ f.write(image_crop_name+' ')
535
+ for x,y in anno:
536
+ f.write(str(x)+' '+str(y)+' ')
537
+ f.write('\n')
538
+ gen_meanface(root_folder, data_name)
539
+ else:
540
+ print('Wrong data!')
541
+
542
+ if __name__ == '__main__':
543
+ if len(sys.argv) < 2:
544
+ print('please input the data name.')
545
+ print('1. data_300W')
546
+ print('2. COFW')
547
+ print('3. WFLW')
548
+ print('4. AFLW')
549
+ exit(0)
550
+ else:
551
+ data_name = sys.argv[1]
552
+ gen_data('../data', data_name, 256)
553
+
554
+
third_party/PIPNet/lib/preprocess_gssl.py ADDED
@@ -0,0 +1,544 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, cv2
2
+ import hdf5storage
3
+ import numpy as np
4
+ import sys
5
+
6
+ def process_300w(root_folder, folder_name, image_name, label_name, target_size):
7
+ image_path = os.path.join(root_folder, folder_name, image_name)
8
+ label_path = os.path.join(root_folder, folder_name, label_name)
9
+
10
+ with open(label_path, 'r') as ff:
11
+ anno = ff.readlines()[3:-1]
12
+ anno = [x.strip().split() for x in anno]
13
+ anno = [[int(float(x[0])), int(float(x[1]))] for x in anno]
14
+ image = cv2.imread(image_path)
15
+ image_height, image_width, _ = image.shape
16
+ anno_x = [x[0] for x in anno]
17
+ anno_y = [x[1] for x in anno]
18
+ bbox_xmin = min(anno_x)
19
+ bbox_ymin = min(anno_y)
20
+ bbox_xmax = max(anno_x)
21
+ bbox_ymax = max(anno_y)
22
+ bbox_width = bbox_xmax - bbox_xmin
23
+ bbox_height = bbox_ymax - bbox_ymin
24
+ scale = 1.3
25
+ bbox_xmin -= int((scale-1)/2*bbox_width)
26
+ bbox_ymin -= int((scale-1)/2*bbox_height)
27
+ bbox_width *= scale
28
+ bbox_height *= scale
29
+ bbox_width = int(bbox_width)
30
+ bbox_height = int(bbox_height)
31
+ bbox_xmin = max(bbox_xmin, 0)
32
+ bbox_ymin = max(bbox_ymin, 0)
33
+ bbox_width = min(bbox_width, image_width-bbox_xmin-1)
34
+ bbox_height = min(bbox_height, image_height-bbox_ymin-1)
35
+ anno = [[(x-bbox_xmin)/bbox_width, (y-bbox_ymin)/bbox_height] for x,y in anno]
36
+
37
+ bbox_xmax = bbox_xmin + bbox_width
38
+ bbox_ymax = bbox_ymin + bbox_height
39
+ image_crop = image[bbox_ymin:bbox_ymax, bbox_xmin:bbox_xmax, :]
40
+ image_crop = cv2.resize(image_crop, (target_size, target_size))
41
+ return image_crop, anno
42
+
43
+ def process_wflw(anno, target_size):
44
+ image_name = anno[-1]
45
+ image_path = os.path.join('..', 'data', 'WFLW', 'WFLW_images', image_name)
46
+ image = cv2.imread(image_path)
47
+ image_height, image_width, _ = image.shape
48
+ lms = anno[:196]
49
+ lms = [float(x) for x in lms]
50
+ lms_x = lms[0::2]
51
+ lms_y = lms[1::2]
52
+ lms_x = [x if x >=0 else 0 for x in lms_x]
53
+ lms_x = [x if x <=image_width else image_width for x in lms_x]
54
+ lms_y = [y if y >=0 else 0 for y in lms_y]
55
+ lms_y = [y if y <=image_height else image_height for y in lms_y]
56
+ lms = [[x,y] for x,y in zip(lms_x, lms_y)]
57
+ lms = [x for z in lms for x in z]
58
+ bbox = anno[196:200]
59
+ bbox = [float(x) for x in bbox]
60
+ attrs = anno[200:206]
61
+ attrs = np.array([int(x) for x in attrs])
62
+ bbox_xmin, bbox_ymin, bbox_xmax, bbox_ymax = bbox
63
+
64
+ width = bbox_xmax - bbox_xmin
65
+ height = bbox_ymax - bbox_ymin
66
+ scale = 1.2
67
+ bbox_xmin -= width * (scale-1)/2
68
+ # remove a part of top area for alignment, see details in paper
69
+ bbox_ymin += height * (scale-1)/2
70
+ bbox_xmax += width * (scale-1)/2
71
+ bbox_ymax += height * (scale-1)/2
72
+ bbox_xmin = max(bbox_xmin, 0)
73
+ bbox_ymin = max(bbox_ymin, 0)
74
+ bbox_xmax = min(bbox_xmax, image_width-1)
75
+ bbox_ymax = min(bbox_ymax, image_height-1)
76
+ width = bbox_xmax - bbox_xmin
77
+ height = bbox_ymax - bbox_ymin
78
+ image_crop = image[int(bbox_ymin):int(bbox_ymax), int(bbox_xmin):int(bbox_xmax), :]
79
+ image_crop = cv2.resize(image_crop, (target_size, target_size))
80
+
81
+ tmp1 = [bbox_xmin, bbox_ymin]*98
82
+ tmp1 = np.array(tmp1)
83
+ tmp2 = [width, height]*98
84
+ tmp2 = np.array(tmp2)
85
+ lms = np.array(lms) - tmp1
86
+ lms = lms / tmp2
87
+ lms = lms.tolist()
88
+ lms = zip(lms[0::2], lms[1::2])
89
+ return image_crop, list(lms)
90
+
91
+ def process_celeba(root_folder, image_name, bbox, target_size):
92
+ image = cv2.imread(os.path.join(root_folder, 'CELEBA', 'img_celeba', image_name))
93
+ image_height, image_width, _ = image.shape
94
+ xmin, ymin, xmax, ymax = bbox
95
+ width = xmax - xmin + 1
96
+ height = ymax - ymin + 1
97
+ scale = 1.2
98
+ xmin -= width * (scale-1)/2
99
+ # remove a part of top area for alignment, see details in paper
100
+ ymin += height * (scale+0.1-1)/2
101
+ xmax += width * (scale-1)/2
102
+ ymax += height * (scale-1)/2
103
+ xmin = max(xmin, 0)
104
+ ymin = max(ymin, 0)
105
+ xmax = min(xmax, image_width-1)
106
+ ymax = min(ymax, image_height-1)
107
+ image_crop = image[int(ymin):int(ymax), int(xmin):int(xmax), :]
108
+ image_crop = cv2.resize(image_crop, (target_size, target_size))
109
+ return image_crop
110
+
111
+ def process_cofw_68_train(image, bbox, anno, target_size):
112
+ image_height, image_width, _ = image.shape
113
+ anno_x = anno[:29]
114
+ anno_y = anno[29:58]
115
+ xmin, ymin, width, height = bbox
116
+ xmax = xmin + width -1
117
+ ymax = ymin + height -1
118
+ scale = 1.3
119
+ xmin -= width * (scale-1)/2
120
+ ymin -= height * (scale-1)/2
121
+ xmax += width * (scale-1)/2
122
+ ymax += height * (scale-1)/2
123
+ xmin = max(xmin, 0)
124
+ ymin = max(ymin, 0)
125
+ xmax = min(xmax, image_width-1)
126
+ ymax = min(ymax, image_height-1)
127
+ anno_x = (anno_x - xmin) / (xmax - xmin)
128
+ anno_y = (anno_y - ymin) / (ymax - ymin)
129
+ anno = np.concatenate([anno_x.reshape(-1,1), anno_y.reshape(-1,1)], axis=1)
130
+ anno = list(anno)
131
+ anno = [list(x) for x in anno]
132
+ image_crop = image[int(ymin):int(ymax), int(xmin):int(xmax), :]
133
+ image_crop = cv2.resize(image_crop, (target_size, target_size))
134
+ return image_crop, anno
135
+
136
+ def process_cofw_68_test(image, bbox, anno, target_size):
137
+ image_height, image_width, _ = image.shape
138
+ anno_x = anno[:,0].flatten()
139
+ anno_y = anno[:,1].flatten()
140
+
141
+ xmin, ymin, width, height = bbox
142
+ xmax = xmin + width -1
143
+ ymax = ymin + height -1
144
+
145
+ scale = 1.3
146
+ xmin -= width * (scale-1)/2
147
+ ymin -= height * (scale-1)/2
148
+ xmax += width * (scale-1)/2
149
+ ymax += height * (scale-1)/2
150
+ xmin = max(xmin, 0)
151
+ ymin = max(ymin, 0)
152
+ xmax = min(xmax, image_width-1)
153
+ ymax = min(ymax, image_height-1)
154
+ anno_x = (anno_x - xmin) / (xmax - xmin)
155
+ anno_y = (anno_y - ymin) / (ymax - ymin)
156
+ anno = np.concatenate([anno_x.reshape(-1,1), anno_y.reshape(-1,1)], axis=1)
157
+ anno = list(anno)
158
+ anno = [list(x) for x in anno]
159
+ image_crop = image[int(ymin):int(ymax), int(xmin):int(xmax), :]
160
+ image_crop = cv2.resize(image_crop, (target_size, target_size))
161
+ return image_crop, anno
162
+
163
+ def gen_meanface(root_folder, data_name):
164
+ with open(os.path.join(root_folder, data_name, 'train_300W.txt'), 'r') as f:
165
+ annos = f.readlines()
166
+ annos = [x.strip().split()[1:] for x in annos]
167
+ annos = [[float(x) for x in anno] for anno in annos]
168
+ annos = np.array(annos)
169
+ meanface = np.mean(annos, axis=0)
170
+ meanface = meanface.tolist()
171
+ meanface = [str(x) for x in meanface]
172
+
173
+ with open(os.path.join(root_folder, data_name, 'meanface.txt'), 'w') as f:
174
+ f.write(' '.join(meanface))
175
+
176
+ def convert_wflw(root_folder, data_name):
177
+ with open(os.path.join(root_folder, data_name, 'test_WFLW_98.txt'), 'r') as f:
178
+ annos = f.readlines()
179
+ annos = [x.strip().split() for x in annos]
180
+ annos_new = []
181
+ for anno in annos:
182
+ annos_new.append([])
183
+ # name
184
+ annos_new[-1].append(anno[0])
185
+ anno = anno[1:]
186
+ # jaw
187
+ for i in range(17):
188
+ annos_new[-1].append(anno[i*2*2])
189
+ annos_new[-1].append(anno[i*2*2+1])
190
+ # left eyebrow
191
+ annos_new[-1].append(anno[33*2])
192
+ annos_new[-1].append(anno[33*2+1])
193
+ annos_new[-1].append(anno[34*2])
194
+ annos_new[-1].append(str((float(anno[34*2+1])+float(anno[41*2+1]))/2))
195
+ annos_new[-1].append(anno[35*2])
196
+ annos_new[-1].append(str((float(anno[35*2+1])+float(anno[40*2+1]))/2))
197
+ annos_new[-1].append(anno[36*2])
198
+ annos_new[-1].append(str((float(anno[36*2+1])+float(anno[39*2+1]))/2))
199
+ annos_new[-1].append(anno[37*2])
200
+ annos_new[-1].append(str((float(anno[37*2+1])+float(anno[38*2+1]))/2))
201
+ # right eyebrow
202
+ annos_new[-1].append(anno[42*2])
203
+ annos_new[-1].append(str((float(anno[42*2+1])+float(anno[50*2+1]))/2))
204
+ annos_new[-1].append(anno[43*2])
205
+ annos_new[-1].append(str((float(anno[43*2+1])+float(anno[49*2+1]))/2))
206
+ annos_new[-1].append(anno[44*2])
207
+ annos_new[-1].append(str((float(anno[44*2+1])+float(anno[48*2+1]))/2))
208
+ annos_new[-1].append(anno[45*2])
209
+ annos_new[-1].append(str((float(anno[45*2+1])+float(anno[47*2+1]))/2))
210
+ annos_new[-1].append(anno[46*2])
211
+ annos_new[-1].append(anno[46*2+1])
212
+ # nose
213
+ for i in range(51, 60):
214
+ annos_new[-1].append(anno[i*2])
215
+ annos_new[-1].append(anno[i*2+1])
216
+ # left eye
217
+ annos_new[-1].append(anno[60*2])
218
+ annos_new[-1].append(anno[60*2+1])
219
+ annos_new[-1].append(str(0.666*float(anno[61*2])+0.333*float(anno[62*2])))
220
+ annos_new[-1].append(str(0.666*float(anno[61*2+1])+0.333*float(anno[62*2+1])))
221
+ annos_new[-1].append(str(0.666*float(anno[63*2])+0.333*float(anno[62*2])))
222
+ annos_new[-1].append(str(0.666*float(anno[63*2+1])+0.333*float(anno[62*2+1])))
223
+ annos_new[-1].append(anno[64*2])
224
+ annos_new[-1].append(anno[64*2+1])
225
+ annos_new[-1].append(str(0.666*float(anno[65*2])+0.333*float(anno[66*2])))
226
+ annos_new[-1].append(str(0.666*float(anno[65*2+1])+0.333*float(anno[66*2+1])))
227
+ annos_new[-1].append(str(0.666*float(anno[67*2])+0.333*float(anno[66*2])))
228
+ annos_new[-1].append(str(0.666*float(anno[67*2+1])+0.333*float(anno[66*2+1])))
229
+ # right eye
230
+ annos_new[-1].append(anno[68*2])
231
+ annos_new[-1].append(anno[68*2+1])
232
+ annos_new[-1].append(str(0.666*float(anno[69*2])+0.333*float(anno[70*2])))
233
+ annos_new[-1].append(str(0.666*float(anno[69*2+1])+0.333*float(anno[70*2+1])))
234
+ annos_new[-1].append(str(0.666*float(anno[71*2])+0.333*float(anno[70*2])))
235
+ annos_new[-1].append(str(0.666*float(anno[71*2+1])+0.333*float(anno[70*2+1])))
236
+ annos_new[-1].append(anno[72*2])
237
+ annos_new[-1].append(anno[72*2+1])
238
+ annos_new[-1].append(str(0.666*float(anno[73*2])+0.333*float(anno[74*2])))
239
+ annos_new[-1].append(str(0.666*float(anno[73*2+1])+0.333*float(anno[74*2+1])))
240
+ annos_new[-1].append(str(0.666*float(anno[75*2])+0.333*float(anno[74*2])))
241
+ annos_new[-1].append(str(0.666*float(anno[75*2+1])+0.333*float(anno[74*2+1])))
242
+ # mouth
243
+ for i in range(76, 96):
244
+ annos_new[-1].append(anno[i*2])
245
+ annos_new[-1].append(anno[i*2+1])
246
+
247
+ with open(os.path.join(root_folder, data_name, 'test_WFLW.txt'), 'w') as f:
248
+ for anno in annos_new:
249
+ f.write(' '.join(anno)+'\n')
250
+
251
+ def gen_data(root_folder, data_name, target_size):
252
+ if not os.path.exists(os.path.join(root_folder, data_name, 'images_train')):
253
+ os.mkdir(os.path.join(root_folder, data_name, 'images_train'))
254
+ if not os.path.exists(os.path.join(root_folder, data_name, 'images_test')):
255
+ os.mkdir(os.path.join(root_folder, data_name, 'images_test'))
256
+ ################################################################################################################
257
+ if data_name == 'CELEBA':
258
+ os.system('rmdir ../data/CELEBA/images_test')
259
+ with open(os.path.join(root_folder, data_name, 'celeba_bboxes.txt'), 'r') as f:
260
+ bboxes = f.readlines()
261
+
262
+ bboxes = [x.strip().split() for x in bboxes]
263
+ with open(os.path.join(root_folder, data_name, 'train.txt'), 'w') as f:
264
+ for bbox in bboxes:
265
+ image_name = bbox[0]
266
+ print(image_name)
267
+ f.write(image_name+'\n')
268
+ bbox = bbox[1:]
269
+ bbox = [int(x) for x in bbox]
270
+ image_crop = process_celeba(root_folder, image_name, bbox, target_size)
271
+ cv2.imwrite(os.path.join(root_folder, data_name, 'images_train', image_name), image_crop)
272
+ ################################################################################################################
273
+ elif data_name == 'data_300W_CELEBA':
274
+ os.system('cp -r ../data/CELEBA/images_train ../data/data_300W_CELEBA/.')
275
+ os.system('cp ../data/CELEBA/train.txt ../data/data_300W_CELEBA/train_CELEBA.txt')
276
+
277
+ os.system('rmdir ../data/data_300W_CELEBA/images_test')
278
+ if not os.path.exists(os.path.join(root_folder, data_name, 'images_test_300W')):
279
+ os.mkdir(os.path.join(root_folder, data_name, 'images_test_300W'))
280
+ if not os.path.exists(os.path.join(root_folder, data_name, 'images_test_COFW')):
281
+ os.mkdir(os.path.join(root_folder, data_name, 'images_test_COFW'))
282
+ if not os.path.exists(os.path.join(root_folder, data_name, 'images_test_WFLW')):
283
+ os.mkdir(os.path.join(root_folder, data_name, 'images_test_WFLW'))
284
+
285
+ # train for data_300W
286
+ folders_train = ['afw', 'helen/trainset', 'lfpw/trainset']
287
+ annos_train = {}
288
+ for folder_train in folders_train:
289
+ all_files = sorted(os.listdir(os.path.join(root_folder, 'data_300W', folder_train)))
290
+ image_files = [x for x in all_files if '.pts' not in x]
291
+ label_files = [x for x in all_files if '.pts' in x]
292
+ assert len(image_files) == len(label_files)
293
+ for image_name, label_name in zip(image_files, label_files):
294
+ print(image_name)
295
+ image_crop, anno = process_300w(os.path.join(root_folder, 'data_300W'), folder_train, image_name, label_name, target_size)
296
+ image_crop_name = folder_train.replace('/', '_')+'_'+image_name
297
+ cv2.imwrite(os.path.join(root_folder, data_name, 'images_train', image_crop_name), image_crop)
298
+ annos_train[image_crop_name] = anno
299
+ with open(os.path.join(root_folder, data_name, 'train_300W.txt'), 'w') as f:
300
+ for image_crop_name, anno in annos_train.items():
301
+ f.write(image_crop_name+' ')
302
+ for x,y in anno:
303
+ f.write(str(x)+' '+str(y)+' ')
304
+ f.write('\n')
305
+
306
+ # test for data_300W
307
+ folders_test = ['helen/testset', 'lfpw/testset', 'ibug']
308
+ annos_test = {}
309
+ for folder_test in folders_test:
310
+ all_files = sorted(os.listdir(os.path.join(root_folder, 'data_300W', folder_test)))
311
+ image_files = [x for x in all_files if '.pts' not in x]
312
+ label_files = [x for x in all_files if '.pts' in x]
313
+ assert len(image_files) == len(label_files)
314
+ for image_name, label_name in zip(image_files, label_files):
315
+ print(image_name)
316
+ image_crop, anno = process_300w(os.path.join(root_folder, 'data_300W'), folder_test, image_name, label_name, target_size)
317
+ image_crop_name = folder_test.replace('/', '_')+'_'+image_name
318
+ cv2.imwrite(os.path.join(root_folder, data_name, 'images_test_300W', image_crop_name), image_crop)
319
+ annos_test[image_crop_name] = anno
320
+ with open(os.path.join(root_folder, data_name, 'test_300W.txt'), 'w') as f:
321
+ for image_crop_name, anno in annos_test.items():
322
+ f.write(image_crop_name+' ')
323
+ for x,y in anno:
324
+ f.write(str(x)+' '+str(y)+' ')
325
+ f.write('\n')
326
+
327
+ # test for COFW_68
328
+ test_mat = hdf5storage.loadmat(os.path.join('../data/COFW', 'COFW_test_color.mat'))
329
+ images = test_mat['IsT']
330
+
331
+ bboxes_mat = hdf5storage.loadmat(os.path.join('../data/data_300W_CELEBA', 'cofw68_test_bboxes.mat'))
332
+ bboxes = bboxes_mat['bboxes']
333
+ image_num = images.shape[0]
334
+ with open('../data/data_300W_CELEBA/test_COFW.txt', 'w') as f:
335
+ for i in range(image_num):
336
+ image = images[i,0]
337
+ # grayscale
338
+ if len(image.shape) == 2:
339
+ image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
340
+ # swap rgb channel to bgr
341
+ else:
342
+ image = image[:,:,::-1]
343
+
344
+ bbox = bboxes[i,:]
345
+ anno_mat = hdf5storage.loadmat(os.path.join('../data/data_300W_CELEBA/cofw68_test_annotations', str(i+1)+'_points.mat'))
346
+ anno = anno_mat['Points']
347
+ image_crop, anno = process_cofw_68_test(image, bbox, anno, target_size)
348
+ pad_num = 4-len(str(i+1))
349
+ image_crop_name = 'cofw_test_' + '0' * pad_num + str(i+1) + '.jpg'
350
+ cv2.imwrite(os.path.join('../data/data_300W_CELEBA/images_test_COFW', image_crop_name), image_crop)
351
+ f.write(image_crop_name+' ')
352
+ for x,y in anno:
353
+ f.write(str(x)+' '+str(y)+' ')
354
+ f.write('\n')
355
+
356
+ # test for WFLW_68
357
+ test_file = 'list_98pt_rect_attr_test.txt'
358
+ with open(os.path.join(root_folder, 'WFLW', 'WFLW_annotations', 'list_98pt_rect_attr_train_test', test_file), 'r') as f:
359
+ annos_test = f.readlines()
360
+ annos_test = [x.strip().split() for x in annos_test]
361
+ names_mapping = {}
362
+ count = 1
363
+ with open(os.path.join(root_folder, 'data_300W_CELEBA', 'test_WFLW_98.txt'), 'w') as f:
364
+ for anno_test in annos_test:
365
+ image_crop, anno = process_wflw(anno_test, target_size)
366
+ pad_num = 4-len(str(count))
367
+ image_crop_name = 'wflw_test_' + '0' * pad_num + str(count) + '.jpg'
368
+ print(image_crop_name)
369
+ names_mapping[anno_test[0]+'_'+anno_test[-1]] = [image_crop_name, anno]
370
+ cv2.imwrite(os.path.join(root_folder, data_name, 'images_test_WFLW', image_crop_name), image_crop)
371
+ f.write(image_crop_name+' ')
372
+ for x,y in list(anno):
373
+ f.write(str(x)+' '+str(y)+' ')
374
+ f.write('\n')
375
+ count += 1
376
+
377
+ convert_wflw(root_folder, data_name)
378
+
379
+ gen_meanface(root_folder, data_name)
380
+ ################################################################################################################
381
+ elif data_name == 'data_300W_COFW_WFLW':
382
+
383
+ os.system('rmdir ../data/data_300W_COFW_WFLW/images_test')
384
+ if not os.path.exists(os.path.join(root_folder, data_name, 'images_test_300W')):
385
+ os.mkdir(os.path.join(root_folder, data_name, 'images_test_300W'))
386
+ if not os.path.exists(os.path.join(root_folder, data_name, 'images_test_COFW')):
387
+ os.mkdir(os.path.join(root_folder, data_name, 'images_test_COFW'))
388
+ if not os.path.exists(os.path.join(root_folder, data_name, 'images_test_WFLW')):
389
+ os.mkdir(os.path.join(root_folder, data_name, 'images_test_WFLW'))
390
+
391
+ # train for data_300W
392
+ folders_train = ['afw', 'helen/trainset', 'lfpw/trainset']
393
+ annos_train = {}
394
+ for folder_train in folders_train:
395
+ all_files = sorted(os.listdir(os.path.join(root_folder, 'data_300W', folder_train)))
396
+ image_files = [x for x in all_files if '.pts' not in x]
397
+ label_files = [x for x in all_files if '.pts' in x]
398
+ assert len(image_files) == len(label_files)
399
+ for image_name, label_name in zip(image_files, label_files):
400
+ print(image_name)
401
+ image_crop, anno = process_300w(os.path.join(root_folder, 'data_300W'), folder_train, image_name, label_name, target_size)
402
+ image_crop_name = folder_train.replace('/', '_')+'_'+image_name
403
+ cv2.imwrite(os.path.join(root_folder, data_name, 'images_train', image_crop_name), image_crop)
404
+ annos_train[image_crop_name] = anno
405
+ with open(os.path.join(root_folder, data_name, 'train_300W.txt'), 'w') as f:
406
+ for image_crop_name, anno in annos_train.items():
407
+ f.write(image_crop_name+' ')
408
+ for x,y in anno:
409
+ f.write(str(x)+' '+str(y)+' ')
410
+ f.write('\n')
411
+
412
+ # test for data_300W
413
+ folders_test = ['helen/testset', 'lfpw/testset', 'ibug']
414
+ annos_test = {}
415
+ for folder_test in folders_test:
416
+ all_files = sorted(os.listdir(os.path.join(root_folder, 'data_300W', folder_test)))
417
+ image_files = [x for x in all_files if '.pts' not in x]
418
+ label_files = [x for x in all_files if '.pts' in x]
419
+ assert len(image_files) == len(label_files)
420
+ for image_name, label_name in zip(image_files, label_files):
421
+ print(image_name)
422
+ image_crop, anno = process_300w(os.path.join(root_folder, 'data_300W'), folder_test, image_name, label_name, target_size)
423
+ image_crop_name = folder_test.replace('/', '_')+'_'+image_name
424
+ cv2.imwrite(os.path.join(root_folder, data_name, 'images_test_300W', image_crop_name), image_crop)
425
+ annos_test[image_crop_name] = anno
426
+ with open(os.path.join(root_folder, data_name, 'test_300W.txt'), 'w') as f:
427
+ for image_crop_name, anno in annos_test.items():
428
+ f.write(image_crop_name+' ')
429
+ for x,y in anno:
430
+ f.write(str(x)+' '+str(y)+' ')
431
+ f.write('\n')
432
+
433
+ # train for COFW_68
434
+ ###################
435
+ train_file = 'COFW_train_color.mat'
436
+ train_mat = hdf5storage.loadmat(os.path.join(root_folder, 'COFW', train_file))
437
+ images = train_mat['IsTr']
438
+ bboxes = train_mat['bboxesTr']
439
+ annos = train_mat['phisTr']
440
+
441
+ count = 1
442
+ with open('../data/data_300W_COFW_WFLW/train_COFW.txt', 'w') as f:
443
+ for i in range(images.shape[0]):
444
+ image = images[i, 0]
445
+ # grayscale
446
+ if len(image.shape) == 2:
447
+ image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
448
+ # swap rgb channel to bgr
449
+ else:
450
+ image = image[:,:,::-1]
451
+ bbox = bboxes[i, :]
452
+ anno = annos[i, :]
453
+ image_crop, anno = process_cofw_68_train(image, bbox, anno, target_size)
454
+ pad_num = 4-len(str(count))
455
+ image_crop_name = 'cofw_train_' + '0' * pad_num + str(count) + '.jpg'
456
+ f.write(image_crop_name+'\n')
457
+ cv2.imwrite(os.path.join(root_folder, 'data_300W_COFW_WFLW', 'images_train', image_crop_name), image_crop)
458
+ count += 1
459
+ ###################
460
+
461
+ # test for COFW_68
462
+ test_mat = hdf5storage.loadmat(os.path.join('../data/COFW', 'COFW_test_color.mat'))
463
+ images = test_mat['IsT']
464
+
465
+ bboxes_mat = hdf5storage.loadmat(os.path.join('../data/data_300W_COFW_WFLW', 'cofw68_test_bboxes.mat'))
466
+ bboxes = bboxes_mat['bboxes']
467
+ image_num = images.shape[0]
468
+ with open('../data/data_300W_COFW_WFLW/test_COFW.txt', 'w') as f:
469
+ for i in range(image_num):
470
+ image = images[i,0]
471
+ # grayscale
472
+ if len(image.shape) == 2:
473
+ image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
474
+ # swap rgb channel to bgr
475
+ else:
476
+ image = image[:,:,::-1]
477
+
478
+ bbox = bboxes[i,:]
479
+ anno_mat = hdf5storage.loadmat(os.path.join('../data/data_300W_COFW_WFLW/cofw68_test_annotations', str(i+1)+'_points.mat'))
480
+ anno = anno_mat['Points']
481
+ image_crop, anno = process_cofw_68_test(image, bbox, anno, target_size)
482
+ pad_num = 4-len(str(i+1))
483
+ image_crop_name = 'cofw_test_' + '0' * pad_num + str(i+1) + '.jpg'
484
+ cv2.imwrite(os.path.join('../data/data_300W_COFW_WFLW/images_test_COFW', image_crop_name), image_crop)
485
+ f.write(image_crop_name+' ')
486
+ for x,y in anno:
487
+ f.write(str(x)+' '+str(y)+' ')
488
+ f.write('\n')
489
+
490
+ # train for WFLW_68
491
+ train_file = 'list_98pt_rect_attr_train.txt'
492
+ with open(os.path.join('../data', 'WFLW', 'WFLW_annotations', 'list_98pt_rect_attr_train_test', train_file), 'r') as f:
493
+ annos_train = f.readlines()
494
+ annos_train = [x.strip().split() for x in annos_train]
495
+ count = 1
496
+ with open('../data/data_300W_COFW_WFLW/train_WFLW.txt', 'w') as f:
497
+ for anno_train in annos_train:
498
+ image_crop, anno = process_wflw(anno_train, target_size)
499
+ pad_num = 4-len(str(count))
500
+ image_crop_name = 'wflw_train_' + '0' * pad_num + str(count) + '.jpg'
501
+ print(image_crop_name)
502
+ f.write(image_crop_name+'\n')
503
+ cv2.imwrite(os.path.join(root_folder, 'data_300W_COFW_WFLW', 'images_train', image_crop_name), image_crop)
504
+ count += 1
505
+
506
+ # test for WFLW_68
507
+ test_file = 'list_98pt_rect_attr_test.txt'
508
+ with open(os.path.join(root_folder, 'WFLW', 'WFLW_annotations', 'list_98pt_rect_attr_train_test', test_file), 'r') as f:
509
+ annos_test = f.readlines()
510
+ annos_test = [x.strip().split() for x in annos_test]
511
+ names_mapping = {}
512
+ count = 1
513
+ with open(os.path.join(root_folder, 'data_300W_COFW_WFLW', 'test_WFLW_98.txt'), 'w') as f:
514
+ for anno_test in annos_test:
515
+ image_crop, anno = process_wflw(anno_test, target_size)
516
+ pad_num = 4-len(str(count))
517
+ image_crop_name = 'wflw_test_' + '0' * pad_num + str(count) + '.jpg'
518
+ print(image_crop_name)
519
+ names_mapping[anno_test[0]+'_'+anno_test[-1]] = [image_crop_name, anno]
520
+ cv2.imwrite(os.path.join(root_folder, data_name, 'images_test_WFLW', image_crop_name), image_crop)
521
+ f.write(image_crop_name+' ')
522
+ for x,y in list(anno):
523
+ f.write(str(x)+' '+str(y)+' ')
524
+ f.write('\n')
525
+ count += 1
526
+
527
+ convert_wflw(root_folder, data_name)
528
+
529
+ gen_meanface(root_folder, data_name)
530
+ else:
531
+ print('Wrong data!')
532
+
533
+ if __name__ == '__main__':
534
+ if len(sys.argv) < 2:
535
+ print('please input the data name.')
536
+ print('1. CELEBA')
537
+ print('2. data_300W_CELEBA')
538
+ print('3. data_300W_COFW_WFLW')
539
+ exit(0)
540
+ else:
541
+ data_name = sys.argv[1]
542
+ gen_data('../data', data_name, 256)
543
+
544
+
third_party/PIPNet/lib/tools.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import sys
3
+
4
+
5
+ from math import floor
6
+ from third_party.PIPNet.FaceBoxesV2.faceboxes_detector import *
7
+
8
+ import torch
9
+ import torch.nn.parallel
10
+ import torch.utils.data
11
+ import torchvision.transforms as transforms
12
+ import torchvision.models as models
13
+
14
+ from third_party.PIPNet.lib.networks import *
15
+ from third_party.PIPNet.lib.functions import *
16
+ from third_party.PIPNet.reverse_index import ri1, ri2
17
+
18
+
19
+ make_abs_path = lambda fn: os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), fn))
20
+
21
+
22
+ class Config:
23
+ def __init__(self):
24
+ self.det_head = "pip"
25
+ self.net_stride = 32
26
+ self.batch_size = 16
27
+ self.init_lr = 0.0001
28
+ self.num_epochs = 60
29
+ self.decay_steps = [30, 50]
30
+ self.input_size = 256
31
+ self.backbone = "resnet101"
32
+ self.pretrained = True
33
+ self.criterion_cls = "l2"
34
+ self.criterion_reg = "l1"
35
+ self.cls_loss_weight = 10
36
+ self.reg_loss_weight = 1
37
+ self.num_lms = 98
38
+ self.save_interval = self.num_epochs
39
+ self.num_nb = 10
40
+ self.use_gpu = True
41
+ self.gpu_id = 3
42
+
43
+
44
+ def get_lmk_model():
45
+
46
+ cfg = Config()
47
+
48
+ resnet101 = models.resnet101(pretrained=cfg.pretrained)
49
+ net = Pip_resnet101(
50
+ resnet101,
51
+ cfg.num_nb,
52
+ num_lms=cfg.num_lms,
53
+ input_size=cfg.input_size,
54
+ net_stride=cfg.net_stride,
55
+ )
56
+
57
+ if cfg.use_gpu:
58
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
59
+ else:
60
+ device = torch.device("cpu")
61
+ net = net.to(device)
62
+
63
+ weight_file = make_abs_path('../../../weights/PIPNet/epoch59.pth')
64
+ state_dict = torch.load(weight_file, map_location=device)
65
+ net.load_state_dict(state_dict)
66
+
67
+ detector = FaceBoxesDetector(
68
+ "FaceBoxes",
69
+ make_abs_path("./../../weights/PIPNet/FaceBoxesV2.pth"),
70
+ use_gpu=torch.cuda.is_available(),
71
+ device=device,
72
+ )
73
+ return net, detector
74
+
75
+
76
+ def demo_image(
77
+ image_file,
78
+ net,
79
+ detector,
80
+ input_size=256,
81
+ net_stride=32,
82
+ num_nb=10,
83
+ use_gpu=True,
84
+ device="cuda:0",
85
+ ):
86
+
87
+ my_thresh = 0.6
88
+ det_box_scale = 1.2
89
+ net.eval()
90
+ preprocess = transforms.Compose(
91
+ [
92
+ transforms.Resize((256, 256)),
93
+ transforms.ToTensor(),
94
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
95
+ ]
96
+ )
97
+ reverse_index1, reverse_index2, max_len = ri1, ri2, 17
98
+ # image = cv2.imread(image_file)
99
+ image = image_file
100
+ image_height, image_width, _ = image.shape
101
+ detections, _ = detector.detect(image, my_thresh, 1)
102
+ lmks = []
103
+ for i in range(len(detections)):
104
+ det_xmin = detections[i][2]
105
+ det_ymin = detections[i][3]
106
+ det_width = detections[i][4]
107
+ det_height = detections[i][5]
108
+ det_xmax = det_xmin + det_width - 1
109
+ det_ymax = det_ymin + det_height - 1
110
+
111
+ det_xmin -= int(det_width * (det_box_scale - 1) / 2)
112
+ # remove a part of top area for alignment, see paper for details
113
+ det_ymin += int(det_height * (det_box_scale - 1) / 2)
114
+ det_xmax += int(det_width * (det_box_scale - 1) / 2)
115
+ det_ymax += int(det_height * (det_box_scale - 1) / 2)
116
+ det_xmin = max(det_xmin, 0)
117
+ det_ymin = max(det_ymin, 0)
118
+ det_xmax = min(det_xmax, image_width - 1)
119
+ det_ymax = min(det_ymax, image_height - 1)
120
+ det_width = det_xmax - det_xmin + 1
121
+ det_height = det_ymax - det_ymin + 1
122
+
123
+ # cv2.rectangle(image, (det_xmin, det_ymin), (det_xmax, det_ymax), (0, 0, 255), 2)
124
+
125
+ det_crop = image[det_ymin:det_ymax, det_xmin:det_xmax, :]
126
+ det_crop = cv2.resize(det_crop, (input_size, input_size))
127
+ inputs = Image.fromarray(det_crop[:, :, ::-1].astype("uint8"), "RGB")
128
+ inputs = preprocess(inputs).unsqueeze(0)
129
+ inputs = inputs.to(device)
130
+ (
131
+ lms_pred_x,
132
+ lms_pred_y,
133
+ lms_pred_nb_x,
134
+ lms_pred_nb_y,
135
+ outputs_cls,
136
+ max_cls,
137
+ ) = forward_pip(net, inputs, preprocess, input_size, net_stride, num_nb)
138
+ lms_pred = torch.cat((lms_pred_x, lms_pred_y), dim=1).flatten()
139
+ tmp_nb_x = lms_pred_nb_x[reverse_index1, reverse_index2].view(98, max_len)
140
+ tmp_nb_y = lms_pred_nb_y[reverse_index1, reverse_index2].view(98, max_len)
141
+ tmp_x = torch.mean(torch.cat((lms_pred_x, tmp_nb_x), dim=1), dim=1).view(-1, 1)
142
+ tmp_y = torch.mean(torch.cat((lms_pred_y, tmp_nb_y), dim=1), dim=1).view(-1, 1)
143
+ lms_pred_merge = torch.cat((tmp_x, tmp_y), dim=1).flatten()
144
+ lms_pred = lms_pred.cpu().numpy()
145
+ lms_pred_merge = lms_pred_merge.cpu().numpy()
146
+ lmk_ = []
147
+ for i in range(98):
148
+ x_pred = lms_pred_merge[i * 2] * det_width
149
+ y_pred = lms_pred_merge[i * 2 + 1] * det_height
150
+
151
+ # cv2.circle(
152
+ # image,
153
+ # (int(x_pred) + det_xmin, int(y_pred) + det_ymin),
154
+ # 1,
155
+ # (0, 0, 255),
156
+ # 1,
157
+ # )
158
+
159
+ lmk_.append([int(x_pred) + det_xmin, int(y_pred) + det_ymin])
160
+ lmks.append(np.array(lmk_))
161
+
162
+ # image_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
163
+ # cv2.imwrite("./1_out.jpg", image_bgr)
164
+
165
+ return lmks
166
+
167
+
168
+ if __name__ == "__main__":
169
+ net, detector = get_lmk_model()
170
+ demo_image(
171
+ "/apdcephfs/private_ahbanliang/codes/Real-ESRGAN-master/tmp_frames/yanikefu/frame00000046.png",
172
+ net,
173
+ detector,
174
+ )
third_party/PIPNet/lib/train.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2, os
2
+ import sys
3
+ sys.path.insert(0, '..')
4
+ import numpy as np
5
+ from PIL import Image
6
+ import logging
7
+ import copy
8
+ import importlib
9
+
10
+ import torch
11
+ import torch.nn as nn
12
+ import torch.optim as optim
13
+ import torch.utils.data
14
+ import torch.nn.functional as F
15
+ import torchvision.transforms as transforms
16
+ import torchvision.datasets as datasets
17
+ import torchvision.models as models
18
+
19
+ from networks import *
20
+ import data_utils
21
+ from functions import *
22
+ from mobilenetv3 import mobilenetv3_large
23
+
24
+ if not len(sys.argv) == 2:
25
+ print('Format:')
26
+ print('python lib/train.py config_file')
27
+ exit(0)
28
+ experiment_name = sys.argv[1].split('/')[-1][:-3]
29
+ data_name = sys.argv[1].split('/')[-2]
30
+ config_path = '.experiments.{}.{}'.format(data_name, experiment_name)
31
+
32
+ my_config = importlib.import_module(config_path, package='PIPNet')
33
+ Config = getattr(my_config, 'Config')
34
+ cfg = Config()
35
+ cfg.experiment_name = experiment_name
36
+ cfg.data_name = data_name
37
+
38
+ os.environ['CUDA_VISIBLE_DEVICES'] = str(cfg.gpu_id)
39
+
40
+ if not os.path.exists(os.path.join('./snapshots', cfg.data_name)):
41
+ os.mkdir(os.path.join('./snapshots', cfg.data_name))
42
+ save_dir = os.path.join('./snapshots', cfg.data_name, cfg.experiment_name)
43
+ if not os.path.exists(save_dir):
44
+ os.mkdir(save_dir)
45
+
46
+ if not os.path.exists(os.path.join('./logs', cfg.data_name)):
47
+ os.mkdir(os.path.join('./logs', cfg.data_name))
48
+ log_dir = os.path.join('./logs', cfg.data_name, cfg.experiment_name)
49
+ if not os.path.exists(log_dir):
50
+ os.mkdir(log_dir)
51
+
52
+ logging.basicConfig(filename=os.path.join(log_dir, 'train.log'), level=logging.INFO)
53
+
54
+ print('###########################################')
55
+ print('experiment_name:', cfg.experiment_name)
56
+ print('data_name:', cfg.data_name)
57
+ print('det_head:', cfg.det_head)
58
+ print('net_stride:', cfg.net_stride)
59
+ print('batch_size:', cfg.batch_size)
60
+ print('init_lr:', cfg.init_lr)
61
+ print('num_epochs:', cfg.num_epochs)
62
+ print('decay_steps:', cfg.decay_steps)
63
+ print('input_size:', cfg.input_size)
64
+ print('backbone:', cfg.backbone)
65
+ print('pretrained:', cfg.pretrained)
66
+ print('criterion_cls:', cfg.criterion_cls)
67
+ print('criterion_reg:', cfg.criterion_reg)
68
+ print('cls_loss_weight:', cfg.cls_loss_weight)
69
+ print('reg_loss_weight:', cfg.reg_loss_weight)
70
+ print('num_lms:', cfg.num_lms)
71
+ print('save_interval:', cfg.save_interval)
72
+ print('num_nb:', cfg.num_nb)
73
+ print('use_gpu:', cfg.use_gpu)
74
+ print('gpu_id:', cfg.gpu_id)
75
+ print('###########################################')
76
+ logging.info('###########################################')
77
+ logging.info('experiment_name: {}'.format(cfg.experiment_name))
78
+ logging.info('data_name: {}'.format(cfg.data_name))
79
+ logging.info('det_head: {}'.format(cfg.det_head))
80
+ logging.info('net_stride: {}'.format(cfg.net_stride))
81
+ logging.info('batch_size: {}'.format(cfg.batch_size))
82
+ logging.info('init_lr: {}'.format(cfg.init_lr))
83
+ logging.info('num_epochs: {}'.format(cfg.num_epochs))
84
+ logging.info('decay_steps: {}'.format(cfg.decay_steps))
85
+ logging.info('input_size: {}'.format(cfg.input_size))
86
+ logging.info('backbone: {}'.format(cfg.backbone))
87
+ logging.info('pretrained: {}'.format(cfg.pretrained))
88
+ logging.info('criterion_cls: {}'.format(cfg.criterion_cls))
89
+ logging.info('criterion_reg: {}'.format(cfg.criterion_reg))
90
+ logging.info('cls_loss_weight: {}'.format(cfg.cls_loss_weight))
91
+ logging.info('reg_loss_weight: {}'.format(cfg.reg_loss_weight))
92
+ logging.info('num_lms: {}'.format(cfg.num_lms))
93
+ logging.info('save_interval: {}'.format(cfg.save_interval))
94
+ logging.info('num_nb: {}'.format(cfg.num_nb))
95
+ logging.info('use_gpu: {}'.format(cfg.use_gpu))
96
+ logging.info('gpu_id: {}'.format(cfg.gpu_id))
97
+ logging.info('###########################################')
98
+
99
+ if cfg.det_head == 'pip':
100
+ meanface_indices, _, _, _ = get_meanface(os.path.join('data', cfg.data_name, 'meanface.txt'), cfg.num_nb)
101
+
102
+
103
+ if cfg.det_head == 'pip':
104
+ if cfg.backbone == 'resnet18':
105
+ resnet18 = models.resnet18(pretrained=cfg.pretrained)
106
+ net = Pip_resnet18(resnet18, cfg.num_nb, num_lms=cfg.num_lms, input_size=cfg.input_size, net_stride=cfg.net_stride)
107
+ elif cfg.backbone == 'resnet50':
108
+ resnet50 = models.resnet50(pretrained=cfg.pretrained)
109
+ net = Pip_resnet50(resnet50, cfg.num_nb, num_lms=cfg.num_lms, input_size=cfg.input_size, net_stride=cfg.net_stride)
110
+ elif cfg.backbone == 'resnet101':
111
+ resnet101 = models.resnet101(pretrained=cfg.pretrained)
112
+ net = Pip_resnet101(resnet101, cfg.num_nb, num_lms=cfg.num_lms, input_size=cfg.input_size, net_stride=cfg.net_stride)
113
+ elif cfg.backbone == 'mobilenet_v2':
114
+ mbnet = models.mobilenet_v2(pretrained=cfg.pretrained)
115
+ net = Pip_mbnetv2(mbnet, cfg.num_nb, num_lms=cfg.num_lms, input_size=cfg.input_size, net_stride=cfg.net_stride)
116
+ elif cfg.backbone == 'mobilenet_v3':
117
+ mbnet = mobilenetv3_large()
118
+ if cfg.pretrained:
119
+ mbnet.load_state_dict(torch.load('lib/mobilenetv3-large-1cd25616.pth'))
120
+ net = Pip_mbnetv3(mbnet, cfg.num_nb, num_lms=cfg.num_lms, input_size=cfg.input_size, net_stride=cfg.net_stride)
121
+ else:
122
+ print('No such backbone!')
123
+ exit(0)
124
+ else:
125
+ print('No such head:', cfg.det_head)
126
+ exit(0)
127
+
128
+ if cfg.use_gpu:
129
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
130
+ else:
131
+ device = torch.device("cpu")
132
+ net = net.to(device)
133
+
134
+ criterion_cls = None
135
+ if cfg.criterion_cls == 'l2':
136
+ criterion_cls = nn.MSELoss()
137
+ elif cfg.criterion_cls == 'l1':
138
+ criterion_cls = nn.L1Loss()
139
+ else:
140
+ print('No such cls criterion:', cfg.criterion_cls)
141
+
142
+ criterion_reg = None
143
+ if cfg.criterion_reg == 'l1':
144
+ criterion_reg = nn.L1Loss()
145
+ elif cfg.criterion_reg == 'l2':
146
+ criterion_reg = nn.MSELoss()
147
+ else:
148
+ print('No such reg criterion:', cfg.criterion_reg)
149
+
150
+ points_flip = None
151
+ if cfg.data_name == 'data_300W':
152
+ points_flip = [17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 28, 29, 30, 31, 36, 35, 34, 33, 32, 46, 45, 44, 43, 48, 47, 40, 39, 38, 37, 42, 41, 55, 54, 53, 52, 51, 50, 49, 60, 59, 58, 57, 56, 65, 64, 63, 62, 61, 68, 67, 66]
153
+ points_flip = (np.array(points_flip)-1).tolist()
154
+ assert len(points_flip) == 68
155
+ elif cfg.data_name == 'WFLW':
156
+ points_flip = [32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 46, 45, 44, 43, 42, 50, 49, 48, 47, 37, 36, 35, 34, 33, 41, 40, 39, 38, 51, 52, 53, 54, 59, 58, 57, 56, 55, 72, 71, 70, 69, 68, 75, 74, 73, 64, 63, 62, 61, 60, 67, 66, 65, 82, 81, 80, 79, 78, 77, 76, 87, 86, 85, 84, 83, 92, 91, 90, 89, 88, 95, 94, 93, 97, 96]
157
+ assert len(points_flip) == 98
158
+ elif cfg.data_name == 'COFW':
159
+ points_flip = [2, 1, 4, 3, 7, 8, 5, 6, 10, 9, 12, 11, 15, 16, 13, 14, 18, 17, 20, 19, 21, 22, 24, 23, 25, 26, 27, 28, 29]
160
+ points_flip = (np.array(points_flip)-1).tolist()
161
+ assert len(points_flip) == 29
162
+ elif cfg.data_name == 'AFLW':
163
+ points_flip = [6, 5, 4, 3, 2, 1, 12, 11, 10, 9, 8, 7, 15, 14, 13, 18, 17, 16, 19]
164
+ points_flip = (np.array(points_flip)-1).tolist()
165
+ assert len(points_flip) == 19
166
+ else:
167
+ print('No such data!')
168
+ exit(0)
169
+
170
+ normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
171
+ std=[0.229, 0.224, 0.225])
172
+
173
+ if cfg.pretrained:
174
+ optimizer = optim.Adam(net.parameters(), lr=cfg.init_lr)
175
+ else:
176
+ optimizer = optim.Adam(net.parameters(), lr=cfg.init_lr, weight_decay=5e-4)
177
+ scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=cfg.decay_steps, gamma=0.1)
178
+
179
+ labels = get_label(cfg.data_name, 'train.txt')
180
+
181
+ if cfg.det_head == 'pip':
182
+ train_data = data_utils.ImageFolder_pip(os.path.join('data', cfg.data_name, 'images_train'),
183
+ labels, cfg.input_size, cfg.num_lms,
184
+ cfg.net_stride, points_flip, meanface_indices,
185
+ transforms.Compose([
186
+ transforms.RandomGrayscale(0.2),
187
+ transforms.ToTensor(),
188
+ normalize]))
189
+ else:
190
+ print('No such head:', cfg.det_head)
191
+ exit(0)
192
+
193
+ train_loader = torch.utils.data.DataLoader(train_data, batch_size=cfg.batch_size, shuffle=True, num_workers=8, pin_memory=True, drop_last=True)
194
+
195
+ train_model(cfg.det_head, net, train_loader, criterion_cls, criterion_reg, cfg.cls_loss_weight, cfg.reg_loss_weight, cfg.num_nb, optimizer, cfg.num_epochs, scheduler, save_dir, cfg.save_interval, device)
196
+
third_party/PIPNet/lib/train_gssl.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2, os
2
+ import sys
3
+ sys.path.insert(0, '..')
4
+ import numpy as np
5
+ from PIL import Image
6
+ import logging
7
+ import importlib
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.optim as optim
12
+ import torch.utils.data
13
+ import torch.nn.functional as F
14
+ import torchvision.transforms as transforms
15
+ import torchvision.datasets as datasets
16
+ import torchvision.models as models
17
+
18
+ from networks_gssl import *
19
+ import data_utils_gssl
20
+ from functions_gssl import *
21
+
22
+ if not len(sys.argv) == 2:
23
+ print('Format:')
24
+ print('python lib/train_gssl.py config_file')
25
+ exit(0)
26
+ experiment_name = sys.argv[1].split('/')[-1][:-3]
27
+ data_name = sys.argv[1].split('/')[-2]
28
+ config_path = '.experiments.{}.{}'.format(data_name, experiment_name)
29
+
30
+ my_config = importlib.import_module(config_path, package='PIPNet')
31
+ Config = getattr(my_config, 'Config')
32
+ cfg = Config()
33
+ cfg.experiment_name = experiment_name
34
+ cfg.data_name = data_name
35
+
36
+ os.environ['CUDA_VISIBLE_DEVICES'] = str(cfg.gpu_id)
37
+
38
+ if not os.path.exists(os.path.join('./snapshots', cfg.data_name)):
39
+ os.mkdir(os.path.join('./snapshots', cfg.data_name))
40
+ save_dir = os.path.join('./snapshots', cfg.data_name, cfg.experiment_name)
41
+ if not os.path.exists(save_dir):
42
+ os.mkdir(save_dir)
43
+
44
+ if not os.path.exists(os.path.join('./logs', cfg.data_name)):
45
+ os.mkdir(os.path.join('./logs', cfg.data_name))
46
+ log_dir = os.path.join('./logs', cfg.data_name, cfg.experiment_name)
47
+ if not os.path.exists(log_dir):
48
+ os.mkdir(log_dir)
49
+
50
+ logging.basicConfig(filename=os.path.join(log_dir, 'train.log'), level=logging.INFO)
51
+
52
+ print('###########################################')
53
+ print('experiment_name:', cfg.experiment_name)
54
+ print('data_name:', cfg.data_name)
55
+ print('det_head:', cfg.det_head)
56
+ print('net_stride:', cfg.net_stride)
57
+ print('batch_size:', cfg.batch_size)
58
+ print('init_lr:', cfg.init_lr)
59
+ print('num_epochs:', cfg.num_epochs)
60
+ print('decay_steps:', cfg.decay_steps)
61
+ print('input_size:', cfg.input_size)
62
+ print('backbone:', cfg.backbone)
63
+ print('pretrained:', cfg.pretrained)
64
+ print('criterion_cls:', cfg.criterion_cls)
65
+ print('criterion_reg:', cfg.criterion_reg)
66
+ print('cls_loss_weight:', cfg.cls_loss_weight)
67
+ print('reg_loss_weight:', cfg.reg_loss_weight)
68
+ print('num_lms:', cfg.num_lms)
69
+ print('save_interval:', cfg.save_interval)
70
+ print('num_nb:', cfg.num_nb)
71
+ print('use_gpu:', cfg.use_gpu)
72
+ print('gpu_id:', cfg.gpu_id)
73
+ print('curriculum:', cfg.curriculum)
74
+ print('###########################################')
75
+ logging.info('###########################################')
76
+ logging.info('experiment_name: {}'.format(cfg.experiment_name))
77
+ logging.info('data_name: {}'.format(cfg.data_name))
78
+ logging.info('det_head: {}'.format(cfg.det_head))
79
+ logging.info('net_stride: {}'.format(cfg.net_stride))
80
+ logging.info('batch_size: {}'.format(cfg.batch_size))
81
+ logging.info('init_lr: {}'.format(cfg.init_lr))
82
+ logging.info('num_epochs: {}'.format(cfg.num_epochs))
83
+ logging.info('decay_steps: {}'.format(cfg.decay_steps))
84
+ logging.info('input_size: {}'.format(cfg.input_size))
85
+ logging.info('backbone: {}'.format(cfg.backbone))
86
+ logging.info('pretrained: {}'.format(cfg.pretrained))
87
+ logging.info('criterion_cls: {}'.format(cfg.criterion_cls))
88
+ logging.info('criterion_reg: {}'.format(cfg.criterion_reg))
89
+ logging.info('cls_loss_weight: {}'.format(cfg.cls_loss_weight))
90
+ logging.info('reg_loss_weight: {}'.format(cfg.reg_loss_weight))
91
+ logging.info('num_lms: {}'.format(cfg.num_lms))
92
+ logging.info('save_interval: {}'.format(cfg.save_interval))
93
+ logging.info('num_nb: {}'.format(cfg.num_nb))
94
+ logging.info('use_gpu: {}'.format(cfg.use_gpu))
95
+ logging.info('gpu_id: {}'.format(cfg.gpu_id))
96
+ logging.info('###########################################')
97
+
98
+ if cfg.curriculum:
99
+ # self-training with curriculum
100
+ task_type_list = ['cls3', 'cls2', 'std', 'std', 'std']
101
+ else:
102
+ # standard self-training
103
+ task_type_list = ['std']*3
104
+
105
+ meanface_indices, reverse_index1, reverse_index2, max_len = get_meanface(os.path.join('data', cfg.data_name, 'meanface.txt'), cfg.num_nb)
106
+
107
+ if cfg.det_head == 'pip':
108
+ if cfg.backbone == 'resnet18':
109
+ resnet18 = models.resnet18(pretrained=cfg.pretrained)
110
+ net = Pip_resnet18(resnet18, cfg.num_nb, num_lms=cfg.num_lms, input_size=cfg.input_size, net_stride=cfg.net_stride)
111
+ else:
112
+ print('No such backbone!')
113
+ exit(0)
114
+ else:
115
+ print('No such head:', cfg.det_head)
116
+ exit(0)
117
+
118
+ if cfg.use_gpu:
119
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
120
+ else:
121
+ device = torch.device("cpu")
122
+ net = net.to(device)
123
+
124
+ criterion_cls = None
125
+ if cfg.criterion_cls == 'l2':
126
+ criterion_cls = nn.MSELoss(reduction='sum')
127
+ elif cfg.criterion_cls == 'l1':
128
+ criterion_cls = nn.L1Loss()
129
+ else:
130
+ print('No such cls criterion:', cfg.criterion_cls)
131
+
132
+ criterion_reg = None
133
+ if cfg.criterion_reg == 'l1':
134
+ criterion_reg = nn.L1Loss(reduction='sum')
135
+ elif cfg.criterion_reg == 'l2':
136
+ criterion_reg = nn.MSELoss()
137
+ else:
138
+ print('No such reg criterion:', cfg.criterion_reg)
139
+
140
+ points_flip = [17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 28, 29, 30, 31, 36, 35, 34, 33, 32, 46, 45, 44, 43, 48, 47, 40, 39, 38, 37, 42, 41, 55, 54, 53, 52, 51, 50, 49, 60, 59, 58, 57, 56, 65, 64, 63, 62, 61, 68, 67, 66]
141
+ points_flip = (np.array(points_flip)-1).tolist()
142
+ assert len(points_flip) == 68
143
+
144
+ normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
145
+ std=[0.229, 0.224, 0.225])
146
+
147
+ optimizer = optim.Adam(net.parameters(), lr=cfg.init_lr)
148
+ scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=cfg.decay_steps, gamma=0.1)
149
+
150
+ labels = get_label(cfg.data_name, 'train_300W.txt', 'std')
151
+
152
+ train_data = data_utils_gssl.ImageFolder_pip(os.path.join('data', cfg.data_name, 'images_train'),
153
+ labels, cfg.input_size, cfg.num_lms,
154
+ cfg.net_stride, points_flip, meanface_indices,
155
+ transforms.Compose([
156
+ transforms.RandomGrayscale(0.2),
157
+ transforms.ToTensor(),
158
+ normalize]))
159
+
160
+ train_loader = torch.utils.data.DataLoader(train_data, batch_size=cfg.batch_size, shuffle=True, num_workers=8, pin_memory=True, drop_last=True)
161
+
162
+ train_model(cfg.det_head, net, train_loader, criterion_cls, criterion_reg, cfg.cls_loss_weight, cfg.reg_loss_weight, cfg.num_nb, optimizer, cfg.num_epochs, scheduler, save_dir, cfg.save_interval, device)
163
+
164
+ ###############
165
+ # test
166
+ norm_indices = [36, 45]
167
+
168
+ preprocess = transforms.Compose([transforms.Resize((cfg.input_size, cfg.input_size)), transforms.ToTensor(), normalize])
169
+ test_data_list = ['300W', 'COFW', 'WFLW']
170
+ for test_data in test_data_list:
171
+ labels = get_label(cfg.data_name, 'test_'+test_data+'.txt')
172
+ nmes = []
173
+ norm = None
174
+ for label in labels:
175
+ image_name = label[0]
176
+ lms_gt = label[1]
177
+ image_path = os.path.join('data', cfg.data_name, 'images_test_'+test_data, image_name)
178
+ image = cv2.imread(image_path)
179
+ image = cv2.resize(image, (cfg.input_size, cfg.input_size))
180
+ inputs = Image.fromarray(image[:,:,::-1].astype('uint8'), 'RGB')
181
+ inputs = preprocess(inputs).unsqueeze(0)
182
+ inputs = inputs.to(device)
183
+ lms_pred_x, lms_pred_y, lms_pred_nb_x, lms_pred_nb_y, outputs_cls, max_cls = forward_pip(net, inputs, preprocess, cfg.input_size, cfg.net_stride, cfg.num_nb)
184
+ # inter-ocular
185
+ norm = np.linalg.norm(lms_gt.reshape(-1, 2)[norm_indices[0]] - lms_gt.reshape(-1, 2)[norm_indices[1]])
186
+ #############################
187
+ # merge neighbor predictions
188
+ lms_pred = torch.cat((lms_pred_x, lms_pred_y), dim=1).flatten().cpu().numpy()
189
+ tmp_nb_x = lms_pred_nb_x[reverse_index1, reverse_index2].view(cfg.num_lms, max_len)
190
+ tmp_nb_y = lms_pred_nb_y[reverse_index1, reverse_index2].view(cfg.num_lms, max_len)
191
+ tmp_x = torch.mean(torch.cat((lms_pred_x, tmp_nb_x), dim=1), dim=1).view(-1,1)
192
+ tmp_y = torch.mean(torch.cat((lms_pred_y, tmp_nb_y), dim=1), dim=1).view(-1,1)
193
+ lms_pred_merge = torch.cat((tmp_x, tmp_y), dim=1).flatten().cpu().numpy()
194
+ #############################
195
+ nme = compute_nme(lms_pred_merge, lms_gt, norm)
196
+ nmes.append(nme)
197
+
198
+ print('{} nme: {}'.format(test_data, np.mean(nmes)))
199
+ logging.info('{} nme: {}'.format(test_data, np.mean(nmes)))
200
+
201
+ for ti, task_type in enumerate(task_type_list):
202
+ print('###################################################')
203
+ print('Iter:', ti, 'task_type:', task_type)
204
+ ###############
205
+ # estimate
206
+ if cfg.data_name == 'data_300W_COFW_WFLW':
207
+ est_data_list = ['COFW', 'WFLW']
208
+ elif cfg.data_name == 'data_300W_CELEBA':
209
+ est_data_list = ['CELEBA']
210
+ else:
211
+ print('No such data!')
212
+ exit(0)
213
+ est_preds = []
214
+ for est_data in est_data_list:
215
+ labels = get_label(cfg.data_name, 'train_'+est_data+'.txt')
216
+ for label in labels:
217
+ image_name = label[0]
218
+ #print(image_name)
219
+ image_path = os.path.join('data', cfg.data_name, 'images_train', image_name)
220
+ image = cv2.imread(image_path)
221
+ image = cv2.resize(image, (cfg.input_size, cfg.input_size))
222
+ inputs = Image.fromarray(image[:,:,::-1].astype('uint8'), 'RGB')
223
+ inputs = preprocess(inputs).unsqueeze(0)
224
+ inputs = inputs.to(device)
225
+ lms_pred_x, lms_pred_y, lms_pred_nb_x, lms_pred_nb_y, outputs_cls, max_cls = forward_pip(net, inputs, preprocess, cfg.input_size, cfg.net_stride, cfg.num_nb)
226
+ #############################
227
+ # merge neighbor predictions
228
+ lms_pred = torch.cat((lms_pred_x, lms_pred_y), dim=1).flatten().cpu().numpy()
229
+ tmp_nb_x = lms_pred_nb_x[reverse_index1, reverse_index2].view(cfg.num_lms, max_len)
230
+ tmp_nb_y = lms_pred_nb_y[reverse_index1, reverse_index2].view(cfg.num_lms, max_len)
231
+ tmp_x = torch.mean(torch.cat((lms_pred_x, tmp_nb_x), dim=1), dim=1).view(-1,1)
232
+ tmp_y = torch.mean(torch.cat((lms_pred_y, tmp_nb_y), dim=1), dim=1).view(-1,1)
233
+ lms_pred_merge = torch.cat((tmp_x, tmp_y), dim=1).flatten().cpu().numpy()
234
+ #############################
235
+ est_preds.append([image_name, task_type, lms_pred_merge])
236
+
237
+ ################
238
+ # GSSL
239
+ if cfg.det_head == 'pip':
240
+ if cfg.backbone == 'resnet18':
241
+ resnet18 = models.resnet18(pretrained=cfg.pretrained)
242
+ net = Pip_resnet18(resnet18, cfg.num_nb, num_lms=cfg.num_lms, input_size=cfg.input_size, net_stride=cfg.net_stride)
243
+ else:
244
+ print('No such backbone!')
245
+ exit(0)
246
+ else:
247
+ print('No such head:', cfg.det_head)
248
+ exit(0)
249
+
250
+ net = net.to(device)
251
+ optimizer = optim.Adam(net.parameters(), lr=cfg.init_lr)
252
+ scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=cfg.decay_steps, gamma=0.1)
253
+ labels = get_label(cfg.data_name, 'train_300W.txt', 'std')
254
+ labels += est_preds
255
+
256
+ train_data = data_utils_gssl.ImageFolder_pip(os.path.join('data', cfg.data_name, 'images_train'),
257
+ labels, cfg.input_size, cfg.num_lms,
258
+ cfg.net_stride, points_flip, meanface_indices,
259
+ transforms.Compose([
260
+ transforms.RandomGrayscale(0.2),
261
+ transforms.ToTensor(),
262
+ normalize]))
263
+
264
+ train_loader = torch.utils.data.DataLoader(train_data, batch_size=cfg.batch_size, shuffle=True, num_workers=8, pin_memory=True, drop_last=True)
265
+
266
+ train_model(cfg.det_head, net, train_loader, criterion_cls, criterion_reg, cfg.cls_loss_weight, cfg.reg_loss_weight, cfg.num_nb, optimizer, cfg.num_epochs, scheduler, save_dir, cfg.save_interval, device)
267
+
268
+ ###############
269
+ # test
270
+ preprocess = transforms.Compose([transforms.Resize((cfg.input_size, cfg.input_size)), transforms.ToTensor(), normalize])
271
+ test_data_list = ['300W', 'COFW', 'WFLW']
272
+ for test_data in test_data_list:
273
+ labels = get_label(cfg.data_name, 'test_'+test_data+'.txt')
274
+ nmes = []
275
+ norm = None
276
+ for label in labels:
277
+ image_name = label[0]
278
+ lms_gt = label[1]
279
+ image_path = os.path.join('data', cfg.data_name, 'images_test_'+test_data, image_name)
280
+ image = cv2.imread(image_path)
281
+ image = cv2.resize(image, (cfg.input_size, cfg.input_size))
282
+ inputs = Image.fromarray(image[:,:,::-1].astype('uint8'), 'RGB')
283
+ inputs = preprocess(inputs).unsqueeze(0)
284
+ inputs = inputs.to(device)
285
+ lms_pred_x, lms_pred_y, lms_pred_nb_x, lms_pred_nb_y, outputs_cls, max_cls = forward_pip(net, inputs, preprocess, cfg.input_size, cfg.net_stride, cfg.num_nb)
286
+ # inter-ocular
287
+ norm = np.linalg.norm(lms_gt.reshape(-1, 2)[norm_indices[0]] - lms_gt.reshape(-1, 2)[norm_indices[1]])
288
+ #############################
289
+ # merge neighbor predictions
290
+ lms_pred = torch.cat((lms_pred_x, lms_pred_y), dim=1).flatten().cpu().numpy()
291
+ tmp_nb_x = lms_pred_nb_x[reverse_index1, reverse_index2].view(cfg.num_lms, max_len)
292
+ tmp_nb_y = lms_pred_nb_y[reverse_index1, reverse_index2].view(cfg.num_lms, max_len)
293
+ tmp_x = torch.mean(torch.cat((lms_pred_x, tmp_nb_x), dim=1), dim=1).view(-1,1)
294
+ tmp_y = torch.mean(torch.cat((lms_pred_y, tmp_nb_y), dim=1), dim=1).view(-1,1)
295
+ lms_pred_merge = torch.cat((tmp_x, tmp_y), dim=1).flatten().cpu().numpy()
296
+ #############################
297
+ nme = compute_nme(lms_pred_merge, lms_gt, norm)
298
+ nmes.append(nme)
299
+
300
+ print('{} nme: {}'.format(test_data, np.mean(nmes)))
301
+ logging.info('{} nme: {}'.format(test_data, np.mean(nmes)))
302
+
303
+
third_party/PIPNet/requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ opencv-python
2
+ scipy
3
+ Cython
third_party/PIPNet/reverse_index.py ADDED
@@ -0,0 +1,3338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ri1 = [
2
+ 1,
3
+ 2,
4
+ 3,
5
+ 4,
6
+ 5,
7
+ 33,
8
+ 1,
9
+ 2,
10
+ 3,
11
+ 4,
12
+ 5,
13
+ 33,
14
+ 1,
15
+ 2,
16
+ 3,
17
+ 4,
18
+ 5,
19
+ 0,
20
+ 2,
21
+ 3,
22
+ 4,
23
+ 5,
24
+ 6,
25
+ 33,
26
+ 0,
27
+ 2,
28
+ 3,
29
+ 4,
30
+ 5,
31
+ 6,
32
+ 33,
33
+ 0,
34
+ 2,
35
+ 3,
36
+ 0,
37
+ 1,
38
+ 3,
39
+ 4,
40
+ 5,
41
+ 6,
42
+ 0,
43
+ 1,
44
+ 3,
45
+ 4,
46
+ 5,
47
+ 6,
48
+ 0,
49
+ 1,
50
+ 3,
51
+ 4,
52
+ 5,
53
+ 0,
54
+ 1,
55
+ 2,
56
+ 4,
57
+ 5,
58
+ 6,
59
+ 7,
60
+ 0,
61
+ 1,
62
+ 2,
63
+ 4,
64
+ 5,
65
+ 6,
66
+ 7,
67
+ 0,
68
+ 1,
69
+ 2,
70
+ 0,
71
+ 1,
72
+ 2,
73
+ 3,
74
+ 5,
75
+ 6,
76
+ 7,
77
+ 8,
78
+ 0,
79
+ 1,
80
+ 2,
81
+ 3,
82
+ 5,
83
+ 6,
84
+ 7,
85
+ 8,
86
+ 0,
87
+ 1,
88
+ 2,
89
+ 3,
90
+ 4,
91
+ 6,
92
+ 7,
93
+ 8,
94
+ 9,
95
+ 1,
96
+ 2,
97
+ 3,
98
+ 4,
99
+ 6,
100
+ 7,
101
+ 8,
102
+ 9,
103
+ 1,
104
+ 2,
105
+ 3,
106
+ 4,
107
+ 5,
108
+ 7,
109
+ 8,
110
+ 9,
111
+ 10,
112
+ 2,
113
+ 3,
114
+ 4,
115
+ 5,
116
+ 7,
117
+ 8,
118
+ 9,
119
+ 10,
120
+ 2,
121
+ 3,
122
+ 4,
123
+ 5,
124
+ 6,
125
+ 8,
126
+ 9,
127
+ 10,
128
+ 3,
129
+ 4,
130
+ 5,
131
+ 6,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3,
136
+ 4,
137
+ 5,
138
+ 4,
139
+ 5,
140
+ 6,
141
+ 7,
142
+ 9,
143
+ 10,
144
+ 11,
145
+ 4,
146
+ 5,
147
+ 6,
148
+ 7,
149
+ 9,
150
+ 10,
151
+ 11,
152
+ 4,
153
+ 5,
154
+ 6,
155
+ 4,
156
+ 5,
157
+ 6,
158
+ 7,
159
+ 8,
160
+ 10,
161
+ 11,
162
+ 12,
163
+ 4,
164
+ 5,
165
+ 6,
166
+ 7,
167
+ 8,
168
+ 10,
169
+ 11,
170
+ 12,
171
+ 4,
172
+ 5,
173
+ 6,
174
+ 7,
175
+ 8,
176
+ 9,
177
+ 11,
178
+ 12,
179
+ 13,
180
+ 76,
181
+ 5,
182
+ 6,
183
+ 7,
184
+ 8,
185
+ 9,
186
+ 11,
187
+ 12,
188
+ 13,
189
+ 7,
190
+ 8,
191
+ 9,
192
+ 10,
193
+ 12,
194
+ 13,
195
+ 14,
196
+ 76,
197
+ 88,
198
+ 7,
199
+ 8,
200
+ 9,
201
+ 10,
202
+ 12,
203
+ 13,
204
+ 14,
205
+ 76,
206
+ 8,
207
+ 9,
208
+ 10,
209
+ 11,
210
+ 13,
211
+ 14,
212
+ 15,
213
+ 8,
214
+ 9,
215
+ 10,
216
+ 11,
217
+ 13,
218
+ 14,
219
+ 15,
220
+ 8,
221
+ 9,
222
+ 10,
223
+ 10,
224
+ 11,
225
+ 12,
226
+ 14,
227
+ 15,
228
+ 16,
229
+ 10,
230
+ 11,
231
+ 12,
232
+ 14,
233
+ 15,
234
+ 16,
235
+ 10,
236
+ 11,
237
+ 12,
238
+ 14,
239
+ 15,
240
+ 11,
241
+ 12,
242
+ 13,
243
+ 15,
244
+ 16,
245
+ 17,
246
+ 11,
247
+ 12,
248
+ 13,
249
+ 15,
250
+ 16,
251
+ 17,
252
+ 11,
253
+ 12,
254
+ 13,
255
+ 15,
256
+ 16,
257
+ 12,
258
+ 13,
259
+ 14,
260
+ 16,
261
+ 17,
262
+ 18,
263
+ 12,
264
+ 13,
265
+ 14,
266
+ 16,
267
+ 17,
268
+ 18,
269
+ 12,
270
+ 13,
271
+ 14,
272
+ 16,
273
+ 17,
274
+ 13,
275
+ 14,
276
+ 15,
277
+ 17,
278
+ 18,
279
+ 19,
280
+ 13,
281
+ 14,
282
+ 15,
283
+ 17,
284
+ 18,
285
+ 19,
286
+ 13,
287
+ 14,
288
+ 15,
289
+ 17,
290
+ 18,
291
+ 14,
292
+ 15,
293
+ 16,
294
+ 18,
295
+ 19,
296
+ 20,
297
+ 14,
298
+ 15,
299
+ 16,
300
+ 18,
301
+ 19,
302
+ 20,
303
+ 14,
304
+ 15,
305
+ 16,
306
+ 18,
307
+ 19,
308
+ 15,
309
+ 16,
310
+ 17,
311
+ 19,
312
+ 20,
313
+ 21,
314
+ 15,
315
+ 16,
316
+ 17,
317
+ 19,
318
+ 20,
319
+ 21,
320
+ 15,
321
+ 16,
322
+ 17,
323
+ 19,
324
+ 20,
325
+ 16,
326
+ 17,
327
+ 18,
328
+ 20,
329
+ 21,
330
+ 22,
331
+ 16,
332
+ 17,
333
+ 18,
334
+ 20,
335
+ 21,
336
+ 22,
337
+ 16,
338
+ 17,
339
+ 18,
340
+ 20,
341
+ 21,
342
+ 17,
343
+ 18,
344
+ 19,
345
+ 21,
346
+ 22,
347
+ 23,
348
+ 24,
349
+ 17,
350
+ 18,
351
+ 19,
352
+ 21,
353
+ 22,
354
+ 23,
355
+ 24,
356
+ 17,
357
+ 18,
358
+ 19,
359
+ 18,
360
+ 19,
361
+ 20,
362
+ 22,
363
+ 23,
364
+ 24,
365
+ 25,
366
+ 82,
367
+ 18,
368
+ 19,
369
+ 20,
370
+ 22,
371
+ 23,
372
+ 24,
373
+ 25,
374
+ 82,
375
+ 18,
376
+ 19,
377
+ 20,
378
+ 21,
379
+ 23,
380
+ 24,
381
+ 25,
382
+ 26,
383
+ 27,
384
+ 19,
385
+ 20,
386
+ 21,
387
+ 23,
388
+ 24,
389
+ 25,
390
+ 26,
391
+ 27,
392
+ 19,
393
+ 20,
394
+ 21,
395
+ 22,
396
+ 24,
397
+ 25,
398
+ 26,
399
+ 27,
400
+ 28,
401
+ 20,
402
+ 21,
403
+ 22,
404
+ 24,
405
+ 25,
406
+ 26,
407
+ 27,
408
+ 28,
409
+ 20,
410
+ 21,
411
+ 22,
412
+ 23,
413
+ 25,
414
+ 26,
415
+ 27,
416
+ 28,
417
+ 21,
418
+ 22,
419
+ 23,
420
+ 25,
421
+ 26,
422
+ 27,
423
+ 28,
424
+ 21,
425
+ 22,
426
+ 23,
427
+ 21,
428
+ 22,
429
+ 23,
430
+ 24,
431
+ 26,
432
+ 27,
433
+ 28,
434
+ 29,
435
+ 21,
436
+ 22,
437
+ 23,
438
+ 24,
439
+ 26,
440
+ 27,
441
+ 28,
442
+ 29,
443
+ 21,
444
+ 22,
445
+ 23,
446
+ 24,
447
+ 25,
448
+ 27,
449
+ 28,
450
+ 29,
451
+ 30,
452
+ 22,
453
+ 23,
454
+ 24,
455
+ 25,
456
+ 27,
457
+ 28,
458
+ 29,
459
+ 30,
460
+ 22,
461
+ 23,
462
+ 24,
463
+ 25,
464
+ 26,
465
+ 28,
466
+ 29,
467
+ 30,
468
+ 31,
469
+ 23,
470
+ 24,
471
+ 25,
472
+ 26,
473
+ 28,
474
+ 29,
475
+ 30,
476
+ 31,
477
+ 23,
478
+ 24,
479
+ 25,
480
+ 26,
481
+ 27,
482
+ 29,
483
+ 30,
484
+ 31,
485
+ 32,
486
+ 24,
487
+ 25,
488
+ 26,
489
+ 27,
490
+ 29,
491
+ 30,
492
+ 31,
493
+ 32,
494
+ 24,
495
+ 25,
496
+ 26,
497
+ 27,
498
+ 28,
499
+ 30,
500
+ 31,
501
+ 32,
502
+ 25,
503
+ 26,
504
+ 27,
505
+ 28,
506
+ 30,
507
+ 31,
508
+ 32,
509
+ 25,
510
+ 26,
511
+ 27,
512
+ 26,
513
+ 27,
514
+ 28,
515
+ 29,
516
+ 31,
517
+ 32,
518
+ 26,
519
+ 27,
520
+ 28,
521
+ 29,
522
+ 31,
523
+ 32,
524
+ 26,
525
+ 27,
526
+ 28,
527
+ 29,
528
+ 31,
529
+ 26,
530
+ 27,
531
+ 28,
532
+ 29,
533
+ 30,
534
+ 32,
535
+ 46,
536
+ 26,
537
+ 27,
538
+ 28,
539
+ 29,
540
+ 30,
541
+ 32,
542
+ 46,
543
+ 26,
544
+ 27,
545
+ 28,
546
+ 27,
547
+ 28,
548
+ 29,
549
+ 30,
550
+ 31,
551
+ 46,
552
+ 27,
553
+ 28,
554
+ 29,
555
+ 30,
556
+ 31,
557
+ 46,
558
+ 27,
559
+ 28,
560
+ 29,
561
+ 30,
562
+ 31,
563
+ 0,
564
+ 1,
565
+ 2,
566
+ 3,
567
+ 34,
568
+ 41,
569
+ 60,
570
+ 0,
571
+ 1,
572
+ 2,
573
+ 3,
574
+ 34,
575
+ 41,
576
+ 60,
577
+ 0,
578
+ 1,
579
+ 2,
580
+ 0,
581
+ 33,
582
+ 35,
583
+ 40,
584
+ 41,
585
+ 60,
586
+ 0,
587
+ 33,
588
+ 35,
589
+ 40,
590
+ 41,
591
+ 60,
592
+ 0,
593
+ 33,
594
+ 35,
595
+ 40,
596
+ 41,
597
+ 33,
598
+ 34,
599
+ 36,
600
+ 37,
601
+ 39,
602
+ 40,
603
+ 41,
604
+ 60,
605
+ 61,
606
+ 62,
607
+ 33,
608
+ 34,
609
+ 36,
610
+ 37,
611
+ 39,
612
+ 40,
613
+ 41,
614
+ 34,
615
+ 35,
616
+ 37,
617
+ 38,
618
+ 39,
619
+ 40,
620
+ 63,
621
+ 64,
622
+ 34,
623
+ 35,
624
+ 37,
625
+ 38,
626
+ 39,
627
+ 40,
628
+ 63,
629
+ 64,
630
+ 34,
631
+ 36,
632
+ 38,
633
+ 39,
634
+ 51,
635
+ 64,
636
+ 36,
637
+ 38,
638
+ 39,
639
+ 51,
640
+ 64,
641
+ 36,
642
+ 38,
643
+ 39,
644
+ 51,
645
+ 64,
646
+ 36,
647
+ 38,
648
+ 36,
649
+ 37,
650
+ 39,
651
+ 51,
652
+ 52,
653
+ 63,
654
+ 64,
655
+ 65,
656
+ 36,
657
+ 37,
658
+ 39,
659
+ 51,
660
+ 52,
661
+ 63,
662
+ 64,
663
+ 65,
664
+ 36,
665
+ 35,
666
+ 36,
667
+ 37,
668
+ 38,
669
+ 40,
670
+ 62,
671
+ 63,
672
+ 64,
673
+ 65,
674
+ 66,
675
+ 67,
676
+ 96,
677
+ 35,
678
+ 36,
679
+ 37,
680
+ 38,
681
+ 40,
682
+ 33,
683
+ 34,
684
+ 35,
685
+ 36,
686
+ 37,
687
+ 38,
688
+ 39,
689
+ 41,
690
+ 60,
691
+ 61,
692
+ 62,
693
+ 63,
694
+ 65,
695
+ 66,
696
+ 67,
697
+ 96,
698
+ 33,
699
+ 0,
700
+ 1,
701
+ 2,
702
+ 33,
703
+ 34,
704
+ 35,
705
+ 40,
706
+ 60,
707
+ 61,
708
+ 67,
709
+ 0,
710
+ 1,
711
+ 2,
712
+ 33,
713
+ 34,
714
+ 35,
715
+ 40,
716
+ 43,
717
+ 49,
718
+ 50,
719
+ 51,
720
+ 68,
721
+ 43,
722
+ 49,
723
+ 50,
724
+ 51,
725
+ 68,
726
+ 43,
727
+ 49,
728
+ 50,
729
+ 51,
730
+ 68,
731
+ 43,
732
+ 49,
733
+ 42,
734
+ 44,
735
+ 45,
736
+ 48,
737
+ 49,
738
+ 50,
739
+ 68,
740
+ 69,
741
+ 42,
742
+ 44,
743
+ 45,
744
+ 48,
745
+ 49,
746
+ 50,
747
+ 68,
748
+ 69,
749
+ 42,
750
+ 42,
751
+ 43,
752
+ 45,
753
+ 46,
754
+ 47,
755
+ 48,
756
+ 49,
757
+ 70,
758
+ 42,
759
+ 43,
760
+ 45,
761
+ 46,
762
+ 47,
763
+ 48,
764
+ 49,
765
+ 70,
766
+ 42,
767
+ 32,
768
+ 44,
769
+ 46,
770
+ 47,
771
+ 48,
772
+ 71,
773
+ 72,
774
+ 73,
775
+ 32,
776
+ 44,
777
+ 46,
778
+ 47,
779
+ 48,
780
+ 71,
781
+ 72,
782
+ 73,
783
+ 32,
784
+ 29,
785
+ 30,
786
+ 31,
787
+ 32,
788
+ 45,
789
+ 47,
790
+ 72,
791
+ 29,
792
+ 30,
793
+ 31,
794
+ 32,
795
+ 45,
796
+ 47,
797
+ 72,
798
+ 29,
799
+ 30,
800
+ 31,
801
+ 30,
802
+ 31,
803
+ 32,
804
+ 44,
805
+ 45,
806
+ 46,
807
+ 48,
808
+ 71,
809
+ 72,
810
+ 73,
811
+ 30,
812
+ 31,
813
+ 32,
814
+ 44,
815
+ 45,
816
+ 46,
817
+ 48,
818
+ 42,
819
+ 43,
820
+ 44,
821
+ 45,
822
+ 46,
823
+ 47,
824
+ 49,
825
+ 50,
826
+ 69,
827
+ 70,
828
+ 71,
829
+ 72,
830
+ 73,
831
+ 74,
832
+ 75,
833
+ 97,
834
+ 42,
835
+ 42,
836
+ 43,
837
+ 44,
838
+ 48,
839
+ 50,
840
+ 68,
841
+ 69,
842
+ 70,
843
+ 74,
844
+ 75,
845
+ 97,
846
+ 42,
847
+ 43,
848
+ 44,
849
+ 48,
850
+ 50,
851
+ 68,
852
+ 42,
853
+ 43,
854
+ 49,
855
+ 51,
856
+ 52,
857
+ 68,
858
+ 69,
859
+ 75,
860
+ 42,
861
+ 43,
862
+ 49,
863
+ 51,
864
+ 52,
865
+ 68,
866
+ 69,
867
+ 75,
868
+ 42,
869
+ 37,
870
+ 38,
871
+ 42,
872
+ 50,
873
+ 52,
874
+ 53,
875
+ 64,
876
+ 68,
877
+ 37,
878
+ 38,
879
+ 42,
880
+ 50,
881
+ 52,
882
+ 53,
883
+ 64,
884
+ 68,
885
+ 37,
886
+ 51,
887
+ 53,
888
+ 54,
889
+ 51,
890
+ 53,
891
+ 54,
892
+ 51,
893
+ 53,
894
+ 54,
895
+ 51,
896
+ 53,
897
+ 54,
898
+ 51,
899
+ 53,
900
+ 54,
901
+ 51,
902
+ 53,
903
+ 51,
904
+ 52,
905
+ 54,
906
+ 55,
907
+ 56,
908
+ 57,
909
+ 59,
910
+ 51,
911
+ 52,
912
+ 54,
913
+ 55,
914
+ 56,
915
+ 57,
916
+ 59,
917
+ 51,
918
+ 52,
919
+ 54,
920
+ 52,
921
+ 53,
922
+ 55,
923
+ 56,
924
+ 57,
925
+ 58,
926
+ 59,
927
+ 52,
928
+ 53,
929
+ 55,
930
+ 56,
931
+ 57,
932
+ 58,
933
+ 59,
934
+ 52,
935
+ 53,
936
+ 55,
937
+ 53,
938
+ 54,
939
+ 56,
940
+ 57,
941
+ 76,
942
+ 77,
943
+ 78,
944
+ 88,
945
+ 53,
946
+ 54,
947
+ 56,
948
+ 57,
949
+ 76,
950
+ 77,
951
+ 78,
952
+ 88,
953
+ 53,
954
+ 53,
955
+ 54,
956
+ 55,
957
+ 57,
958
+ 58,
959
+ 77,
960
+ 78,
961
+ 79,
962
+ 88,
963
+ 53,
964
+ 54,
965
+ 55,
966
+ 57,
967
+ 58,
968
+ 77,
969
+ 78,
970
+ 79,
971
+ 53,
972
+ 54,
973
+ 55,
974
+ 56,
975
+ 58,
976
+ 59,
977
+ 78,
978
+ 79,
979
+ 80,
980
+ 90,
981
+ 53,
982
+ 54,
983
+ 55,
984
+ 56,
985
+ 58,
986
+ 59,
987
+ 78,
988
+ 53,
989
+ 54,
990
+ 56,
991
+ 57,
992
+ 59,
993
+ 79,
994
+ 80,
995
+ 81,
996
+ 82,
997
+ 92,
998
+ 53,
999
+ 54,
1000
+ 56,
1001
+ 57,
1002
+ 59,
1003
+ 79,
1004
+ 80,
1005
+ 53,
1006
+ 54,
1007
+ 57,
1008
+ 58,
1009
+ 80,
1010
+ 81,
1011
+ 82,
1012
+ 92,
1013
+ 53,
1014
+ 54,
1015
+ 57,
1016
+ 58,
1017
+ 80,
1018
+ 81,
1019
+ 82,
1020
+ 92,
1021
+ 53,
1022
+ 0,
1023
+ 1,
1024
+ 2,
1025
+ 3,
1026
+ 4,
1027
+ 33,
1028
+ 34,
1029
+ 41,
1030
+ 61,
1031
+ 62,
1032
+ 66,
1033
+ 67,
1034
+ 96,
1035
+ 0,
1036
+ 1,
1037
+ 2,
1038
+ 3,
1039
+ 0,
1040
+ 1,
1041
+ 33,
1042
+ 34,
1043
+ 35,
1044
+ 40,
1045
+ 41,
1046
+ 60,
1047
+ 62,
1048
+ 63,
1049
+ 65,
1050
+ 66,
1051
+ 67,
1052
+ 96,
1053
+ 0,
1054
+ 1,
1055
+ 33,
1056
+ 33,
1057
+ 34,
1058
+ 35,
1059
+ 36,
1060
+ 37,
1061
+ 38,
1062
+ 39,
1063
+ 40,
1064
+ 41,
1065
+ 60,
1066
+ 61,
1067
+ 63,
1068
+ 64,
1069
+ 65,
1070
+ 66,
1071
+ 67,
1072
+ 96,
1073
+ 35,
1074
+ 36,
1075
+ 37,
1076
+ 38,
1077
+ 39,
1078
+ 40,
1079
+ 51,
1080
+ 52,
1081
+ 61,
1082
+ 62,
1083
+ 64,
1084
+ 65,
1085
+ 66,
1086
+ 67,
1087
+ 96,
1088
+ 35,
1089
+ 36,
1090
+ 36,
1091
+ 37,
1092
+ 38,
1093
+ 39,
1094
+ 51,
1095
+ 52,
1096
+ 53,
1097
+ 63,
1098
+ 65,
1099
+ 66,
1100
+ 96,
1101
+ 36,
1102
+ 37,
1103
+ 38,
1104
+ 39,
1105
+ 51,
1106
+ 52,
1107
+ 36,
1108
+ 37,
1109
+ 38,
1110
+ 39,
1111
+ 52,
1112
+ 61,
1113
+ 62,
1114
+ 63,
1115
+ 64,
1116
+ 66,
1117
+ 67,
1118
+ 96,
1119
+ 36,
1120
+ 37,
1121
+ 38,
1122
+ 39,
1123
+ 52,
1124
+ 41,
1125
+ 60,
1126
+ 61,
1127
+ 62,
1128
+ 63,
1129
+ 64,
1130
+ 65,
1131
+ 67,
1132
+ 96,
1133
+ 41,
1134
+ 60,
1135
+ 61,
1136
+ 62,
1137
+ 63,
1138
+ 64,
1139
+ 65,
1140
+ 67,
1141
+ 0,
1142
+ 1,
1143
+ 2,
1144
+ 3,
1145
+ 33,
1146
+ 34,
1147
+ 35,
1148
+ 40,
1149
+ 41,
1150
+ 60,
1151
+ 61,
1152
+ 62,
1153
+ 65,
1154
+ 66,
1155
+ 96,
1156
+ 0,
1157
+ 1,
1158
+ 42,
1159
+ 43,
1160
+ 49,
1161
+ 50,
1162
+ 51,
1163
+ 52,
1164
+ 53,
1165
+ 69,
1166
+ 74,
1167
+ 75,
1168
+ 97,
1169
+ 42,
1170
+ 43,
1171
+ 49,
1172
+ 50,
1173
+ 51,
1174
+ 52,
1175
+ 42,
1176
+ 43,
1177
+ 44,
1178
+ 48,
1179
+ 49,
1180
+ 50,
1181
+ 51,
1182
+ 68,
1183
+ 70,
1184
+ 71,
1185
+ 73,
1186
+ 74,
1187
+ 75,
1188
+ 97,
1189
+ 42,
1190
+ 43,
1191
+ 44,
1192
+ 42,
1193
+ 43,
1194
+ 44,
1195
+ 45,
1196
+ 46,
1197
+ 47,
1198
+ 48,
1199
+ 49,
1200
+ 50,
1201
+ 68,
1202
+ 69,
1203
+ 71,
1204
+ 72,
1205
+ 73,
1206
+ 74,
1207
+ 75,
1208
+ 97,
1209
+ 31,
1210
+ 32,
1211
+ 44,
1212
+ 45,
1213
+ 46,
1214
+ 47,
1215
+ 48,
1216
+ 69,
1217
+ 70,
1218
+ 72,
1219
+ 73,
1220
+ 74,
1221
+ 75,
1222
+ 97,
1223
+ 31,
1224
+ 32,
1225
+ 44,
1226
+ 28,
1227
+ 29,
1228
+ 30,
1229
+ 31,
1230
+ 32,
1231
+ 45,
1232
+ 46,
1233
+ 47,
1234
+ 70,
1235
+ 71,
1236
+ 73,
1237
+ 74,
1238
+ 97,
1239
+ 28,
1240
+ 29,
1241
+ 30,
1242
+ 31,
1243
+ 29,
1244
+ 30,
1245
+ 31,
1246
+ 32,
1247
+ 44,
1248
+ 45,
1249
+ 46,
1250
+ 47,
1251
+ 48,
1252
+ 70,
1253
+ 71,
1254
+ 72,
1255
+ 74,
1256
+ 75,
1257
+ 97,
1258
+ 29,
1259
+ 30,
1260
+ 47,
1261
+ 68,
1262
+ 69,
1263
+ 70,
1264
+ 71,
1265
+ 72,
1266
+ 73,
1267
+ 75,
1268
+ 97,
1269
+ 47,
1270
+ 68,
1271
+ 69,
1272
+ 70,
1273
+ 71,
1274
+ 72,
1275
+ 73,
1276
+ 75,
1277
+ 42,
1278
+ 43,
1279
+ 49,
1280
+ 50,
1281
+ 52,
1282
+ 68,
1283
+ 69,
1284
+ 70,
1285
+ 71,
1286
+ 72,
1287
+ 73,
1288
+ 74,
1289
+ 97,
1290
+ 42,
1291
+ 43,
1292
+ 49,
1293
+ 50,
1294
+ 6,
1295
+ 7,
1296
+ 8,
1297
+ 9,
1298
+ 10,
1299
+ 11,
1300
+ 12,
1301
+ 55,
1302
+ 77,
1303
+ 87,
1304
+ 88,
1305
+ 89,
1306
+ 95,
1307
+ 6,
1308
+ 7,
1309
+ 8,
1310
+ 9,
1311
+ 55,
1312
+ 56,
1313
+ 76,
1314
+ 78,
1315
+ 86,
1316
+ 87,
1317
+ 88,
1318
+ 89,
1319
+ 95,
1320
+ 55,
1321
+ 56,
1322
+ 76,
1323
+ 78,
1324
+ 86,
1325
+ 87,
1326
+ 88,
1327
+ 89,
1328
+ 54,
1329
+ 55,
1330
+ 56,
1331
+ 57,
1332
+ 58,
1333
+ 76,
1334
+ 77,
1335
+ 79,
1336
+ 80,
1337
+ 85,
1338
+ 86,
1339
+ 87,
1340
+ 88,
1341
+ 89,
1342
+ 90,
1343
+ 94,
1344
+ 95,
1345
+ 54,
1346
+ 55,
1347
+ 56,
1348
+ 57,
1349
+ 58,
1350
+ 59,
1351
+ 77,
1352
+ 78,
1353
+ 80,
1354
+ 81,
1355
+ 84,
1356
+ 85,
1357
+ 86,
1358
+ 89,
1359
+ 90,
1360
+ 91,
1361
+ 94,
1362
+ 54,
1363
+ 57,
1364
+ 58,
1365
+ 59,
1366
+ 78,
1367
+ 79,
1368
+ 81,
1369
+ 82,
1370
+ 83,
1371
+ 84,
1372
+ 85,
1373
+ 90,
1374
+ 91,
1375
+ 92,
1376
+ 93,
1377
+ 94,
1378
+ 54,
1379
+ 58,
1380
+ 59,
1381
+ 80,
1382
+ 82,
1383
+ 83,
1384
+ 84,
1385
+ 91,
1386
+ 92,
1387
+ 93,
1388
+ 58,
1389
+ 59,
1390
+ 80,
1391
+ 82,
1392
+ 83,
1393
+ 84,
1394
+ 91,
1395
+ 92,
1396
+ 20,
1397
+ 21,
1398
+ 22,
1399
+ 23,
1400
+ 24,
1401
+ 25,
1402
+ 26,
1403
+ 59,
1404
+ 81,
1405
+ 83,
1406
+ 91,
1407
+ 92,
1408
+ 93,
1409
+ 20,
1410
+ 21,
1411
+ 22,
1412
+ 23,
1413
+ 17,
1414
+ 18,
1415
+ 19,
1416
+ 20,
1417
+ 21,
1418
+ 22,
1419
+ 23,
1420
+ 81,
1421
+ 82,
1422
+ 84,
1423
+ 91,
1424
+ 92,
1425
+ 93,
1426
+ 17,
1427
+ 18,
1428
+ 19,
1429
+ 20,
1430
+ 16,
1431
+ 17,
1432
+ 18,
1433
+ 19,
1434
+ 20,
1435
+ 81,
1436
+ 82,
1437
+ 83,
1438
+ 85,
1439
+ 91,
1440
+ 92,
1441
+ 93,
1442
+ 94,
1443
+ 16,
1444
+ 17,
1445
+ 18,
1446
+ 19,
1447
+ 14,
1448
+ 15,
1449
+ 16,
1450
+ 17,
1451
+ 18,
1452
+ 83,
1453
+ 84,
1454
+ 86,
1455
+ 87,
1456
+ 90,
1457
+ 93,
1458
+ 94,
1459
+ 95,
1460
+ 14,
1461
+ 15,
1462
+ 16,
1463
+ 17,
1464
+ 11,
1465
+ 12,
1466
+ 13,
1467
+ 14,
1468
+ 15,
1469
+ 16,
1470
+ 76,
1471
+ 77,
1472
+ 85,
1473
+ 87,
1474
+ 88,
1475
+ 89,
1476
+ 94,
1477
+ 95,
1478
+ 11,
1479
+ 12,
1480
+ 13,
1481
+ 9,
1482
+ 10,
1483
+ 11,
1484
+ 12,
1485
+ 13,
1486
+ 14,
1487
+ 76,
1488
+ 77,
1489
+ 86,
1490
+ 88,
1491
+ 89,
1492
+ 95,
1493
+ 9,
1494
+ 10,
1495
+ 11,
1496
+ 12,
1497
+ 13,
1498
+ 7,
1499
+ 8,
1500
+ 9,
1501
+ 10,
1502
+ 11,
1503
+ 12,
1504
+ 13,
1505
+ 55,
1506
+ 76,
1507
+ 77,
1508
+ 86,
1509
+ 87,
1510
+ 89,
1511
+ 95,
1512
+ 7,
1513
+ 8,
1514
+ 9,
1515
+ 55,
1516
+ 56,
1517
+ 76,
1518
+ 77,
1519
+ 78,
1520
+ 79,
1521
+ 86,
1522
+ 87,
1523
+ 88,
1524
+ 90,
1525
+ 95,
1526
+ 55,
1527
+ 56,
1528
+ 76,
1529
+ 77,
1530
+ 78,
1531
+ 79,
1532
+ 56,
1533
+ 57,
1534
+ 58,
1535
+ 78,
1536
+ 79,
1537
+ 80,
1538
+ 83,
1539
+ 84,
1540
+ 85,
1541
+ 86,
1542
+ 87,
1543
+ 89,
1544
+ 91,
1545
+ 92,
1546
+ 93,
1547
+ 94,
1548
+ 95,
1549
+ 58,
1550
+ 59,
1551
+ 79,
1552
+ 80,
1553
+ 81,
1554
+ 82,
1555
+ 83,
1556
+ 84,
1557
+ 85,
1558
+ 90,
1559
+ 92,
1560
+ 93,
1561
+ 94,
1562
+ 58,
1563
+ 59,
1564
+ 79,
1565
+ 80,
1566
+ 19,
1567
+ 20,
1568
+ 21,
1569
+ 22,
1570
+ 23,
1571
+ 24,
1572
+ 25,
1573
+ 59,
1574
+ 81,
1575
+ 82,
1576
+ 83,
1577
+ 84,
1578
+ 91,
1579
+ 93,
1580
+ 19,
1581
+ 20,
1582
+ 21,
1583
+ 18,
1584
+ 19,
1585
+ 79,
1586
+ 80,
1587
+ 81,
1588
+ 82,
1589
+ 83,
1590
+ 84,
1591
+ 85,
1592
+ 90,
1593
+ 91,
1594
+ 92,
1595
+ 94,
1596
+ 18,
1597
+ 19,
1598
+ 79,
1599
+ 80,
1600
+ 15,
1601
+ 16,
1602
+ 17,
1603
+ 78,
1604
+ 79,
1605
+ 80,
1606
+ 83,
1607
+ 84,
1608
+ 85,
1609
+ 86,
1610
+ 87,
1611
+ 89,
1612
+ 90,
1613
+ 91,
1614
+ 93,
1615
+ 95,
1616
+ 15,
1617
+ 13,
1618
+ 14,
1619
+ 15,
1620
+ 76,
1621
+ 77,
1622
+ 78,
1623
+ 85,
1624
+ 86,
1625
+ 87,
1626
+ 88,
1627
+ 89,
1628
+ 90,
1629
+ 94,
1630
+ 13,
1631
+ 14,
1632
+ 15,
1633
+ 76,
1634
+ 34,
1635
+ 35,
1636
+ 36,
1637
+ 38,
1638
+ 39,
1639
+ 40,
1640
+ 41,
1641
+ 60,
1642
+ 61,
1643
+ 62,
1644
+ 63,
1645
+ 64,
1646
+ 65,
1647
+ 66,
1648
+ 67,
1649
+ 34,
1650
+ 35,
1651
+ 43,
1652
+ 44,
1653
+ 45,
1654
+ 47,
1655
+ 48,
1656
+ 49,
1657
+ 50,
1658
+ 68,
1659
+ 69,
1660
+ 70,
1661
+ 71,
1662
+ 72,
1663
+ 73,
1664
+ 74,
1665
+ 75,
1666
+ 43,
1667
+ 44,
1668
+ ]
1669
+
1670
+
1671
+ ri2 = [
1672
+ 0,
1673
+ 2,
1674
+ 4,
1675
+ 6,
1676
+ 8,
1677
+ 4,
1678
+ 0,
1679
+ 2,
1680
+ 4,
1681
+ 6,
1682
+ 8,
1683
+ 4,
1684
+ 0,
1685
+ 2,
1686
+ 4,
1687
+ 6,
1688
+ 8,
1689
+ 0,
1690
+ 0,
1691
+ 2,
1692
+ 4,
1693
+ 6,
1694
+ 8,
1695
+ 8,
1696
+ 0,
1697
+ 0,
1698
+ 2,
1699
+ 4,
1700
+ 6,
1701
+ 8,
1702
+ 8,
1703
+ 0,
1704
+ 0,
1705
+ 2,
1706
+ 1,
1707
+ 1,
1708
+ 0,
1709
+ 2,
1710
+ 4,
1711
+ 6,
1712
+ 1,
1713
+ 1,
1714
+ 0,
1715
+ 2,
1716
+ 4,
1717
+ 6,
1718
+ 1,
1719
+ 1,
1720
+ 0,
1721
+ 2,
1722
+ 4,
1723
+ 3,
1724
+ 2,
1725
+ 1,
1726
+ 0,
1727
+ 2,
1728
+ 4,
1729
+ 6,
1730
+ 3,
1731
+ 2,
1732
+ 1,
1733
+ 0,
1734
+ 2,
1735
+ 4,
1736
+ 6,
1737
+ 3,
1738
+ 2,
1739
+ 1,
1740
+ 6,
1741
+ 3,
1742
+ 3,
1743
+ 1,
1744
+ 0,
1745
+ 2,
1746
+ 4,
1747
+ 7,
1748
+ 6,
1749
+ 3,
1750
+ 3,
1751
+ 1,
1752
+ 0,
1753
+ 2,
1754
+ 4,
1755
+ 7,
1756
+ 6,
1757
+ 6,
1758
+ 4,
1759
+ 3,
1760
+ 1,
1761
+ 0,
1762
+ 2,
1763
+ 4,
1764
+ 8,
1765
+ 6,
1766
+ 4,
1767
+ 3,
1768
+ 1,
1769
+ 0,
1770
+ 2,
1771
+ 4,
1772
+ 8,
1773
+ 6,
1774
+ 7,
1775
+ 5,
1776
+ 3,
1777
+ 1,
1778
+ 0,
1779
+ 2,
1780
+ 4,
1781
+ 9,
1782
+ 7,
1783
+ 5,
1784
+ 3,
1785
+ 1,
1786
+ 0,
1787
+ 2,
1788
+ 4,
1789
+ 9,
1790
+ 7,
1791
+ 6,
1792
+ 5,
1793
+ 3,
1794
+ 1,
1795
+ 0,
1796
+ 2,
1797
+ 4,
1798
+ 6,
1799
+ 5,
1800
+ 3,
1801
+ 1,
1802
+ 0,
1803
+ 2,
1804
+ 4,
1805
+ 6,
1806
+ 5,
1807
+ 3,
1808
+ 7,
1809
+ 5,
1810
+ 3,
1811
+ 1,
1812
+ 0,
1813
+ 2,
1814
+ 4,
1815
+ 7,
1816
+ 5,
1817
+ 3,
1818
+ 1,
1819
+ 0,
1820
+ 2,
1821
+ 4,
1822
+ 7,
1823
+ 5,
1824
+ 3,
1825
+ 9,
1826
+ 7,
1827
+ 5,
1828
+ 3,
1829
+ 1,
1830
+ 0,
1831
+ 2,
1832
+ 5,
1833
+ 9,
1834
+ 7,
1835
+ 5,
1836
+ 3,
1837
+ 1,
1838
+ 0,
1839
+ 2,
1840
+ 5,
1841
+ 9,
1842
+ 9,
1843
+ 7,
1844
+ 5,
1845
+ 3,
1846
+ 1,
1847
+ 0,
1848
+ 2,
1849
+ 5,
1850
+ 8,
1851
+ 9,
1852
+ 7,
1853
+ 5,
1854
+ 3,
1855
+ 1,
1856
+ 0,
1857
+ 2,
1858
+ 5,
1859
+ 7,
1860
+ 5,
1861
+ 3,
1862
+ 1,
1863
+ 0,
1864
+ 2,
1865
+ 5,
1866
+ 9,
1867
+ 9,
1868
+ 7,
1869
+ 5,
1870
+ 3,
1871
+ 1,
1872
+ 0,
1873
+ 2,
1874
+ 5,
1875
+ 9,
1876
+ 9,
1877
+ 5,
1878
+ 3,
1879
+ 1,
1880
+ 0,
1881
+ 2,
1882
+ 4,
1883
+ 9,
1884
+ 5,
1885
+ 3,
1886
+ 1,
1887
+ 0,
1888
+ 2,
1889
+ 4,
1890
+ 9,
1891
+ 5,
1892
+ 3,
1893
+ 6,
1894
+ 3,
1895
+ 1,
1896
+ 0,
1897
+ 2,
1898
+ 6,
1899
+ 6,
1900
+ 3,
1901
+ 1,
1902
+ 0,
1903
+ 2,
1904
+ 6,
1905
+ 6,
1906
+ 3,
1907
+ 1,
1908
+ 0,
1909
+ 2,
1910
+ 7,
1911
+ 3,
1912
+ 1,
1913
+ 0,
1914
+ 3,
1915
+ 7,
1916
+ 7,
1917
+ 3,
1918
+ 1,
1919
+ 0,
1920
+ 3,
1921
+ 7,
1922
+ 7,
1923
+ 3,
1924
+ 1,
1925
+ 0,
1926
+ 3,
1927
+ 6,
1928
+ 3,
1929
+ 1,
1930
+ 1,
1931
+ 3,
1932
+ 6,
1933
+ 6,
1934
+ 3,
1935
+ 1,
1936
+ 1,
1937
+ 3,
1938
+ 6,
1939
+ 6,
1940
+ 3,
1941
+ 1,
1942
+ 1,
1943
+ 3,
1944
+ 7,
1945
+ 3,
1946
+ 1,
1947
+ 1,
1948
+ 3,
1949
+ 7,
1950
+ 7,
1951
+ 3,
1952
+ 1,
1953
+ 1,
1954
+ 3,
1955
+ 7,
1956
+ 7,
1957
+ 3,
1958
+ 1,
1959
+ 1,
1960
+ 3,
1961
+ 6,
1962
+ 3,
1963
+ 0,
1964
+ 1,
1965
+ 3,
1966
+ 6,
1967
+ 6,
1968
+ 3,
1969
+ 0,
1970
+ 1,
1971
+ 3,
1972
+ 6,
1973
+ 6,
1974
+ 3,
1975
+ 0,
1976
+ 1,
1977
+ 3,
1978
+ 7,
1979
+ 2,
1980
+ 0,
1981
+ 1,
1982
+ 3,
1983
+ 5,
1984
+ 7,
1985
+ 2,
1986
+ 0,
1987
+ 1,
1988
+ 3,
1989
+ 5,
1990
+ 7,
1991
+ 2,
1992
+ 0,
1993
+ 1,
1994
+ 3,
1995
+ 5,
1996
+ 2,
1997
+ 0,
1998
+ 1,
1999
+ 3,
2000
+ 5,
2001
+ 5,
2002
+ 2,
2003
+ 0,
2004
+ 1,
2005
+ 3,
2006
+ 5,
2007
+ 5,
2008
+ 2,
2009
+ 0,
2010
+ 1,
2011
+ 3,
2012
+ 4,
2013
+ 2,
2014
+ 0,
2015
+ 1,
2016
+ 3,
2017
+ 5,
2018
+ 8,
2019
+ 4,
2020
+ 2,
2021
+ 0,
2022
+ 1,
2023
+ 3,
2024
+ 5,
2025
+ 8,
2026
+ 4,
2027
+ 2,
2028
+ 0,
2029
+ 5,
2030
+ 2,
2031
+ 0,
2032
+ 1,
2033
+ 3,
2034
+ 5,
2035
+ 7,
2036
+ 9,
2037
+ 5,
2038
+ 2,
2039
+ 0,
2040
+ 1,
2041
+ 3,
2042
+ 5,
2043
+ 7,
2044
+ 9,
2045
+ 5,
2046
+ 4,
2047
+ 2,
2048
+ 0,
2049
+ 1,
2050
+ 3,
2051
+ 5,
2052
+ 7,
2053
+ 9,
2054
+ 4,
2055
+ 2,
2056
+ 0,
2057
+ 1,
2058
+ 3,
2059
+ 5,
2060
+ 7,
2061
+ 9,
2062
+ 4,
2063
+ 4,
2064
+ 2,
2065
+ 0,
2066
+ 1,
2067
+ 3,
2068
+ 5,
2069
+ 7,
2070
+ 9,
2071
+ 4,
2072
+ 2,
2073
+ 0,
2074
+ 1,
2075
+ 3,
2076
+ 5,
2077
+ 7,
2078
+ 9,
2079
+ 4,
2080
+ 4,
2081
+ 2,
2082
+ 0,
2083
+ 1,
2084
+ 3,
2085
+ 5,
2086
+ 7,
2087
+ 4,
2088
+ 2,
2089
+ 0,
2090
+ 1,
2091
+ 3,
2092
+ 5,
2093
+ 7,
2094
+ 4,
2095
+ 2,
2096
+ 0,
2097
+ 9,
2098
+ 4,
2099
+ 2,
2100
+ 0,
2101
+ 1,
2102
+ 3,
2103
+ 5,
2104
+ 6,
2105
+ 9,
2106
+ 4,
2107
+ 2,
2108
+ 0,
2109
+ 1,
2110
+ 3,
2111
+ 5,
2112
+ 6,
2113
+ 9,
2114
+ 9,
2115
+ 4,
2116
+ 2,
2117
+ 0,
2118
+ 1,
2119
+ 3,
2120
+ 5,
2121
+ 6,
2122
+ 9,
2123
+ 4,
2124
+ 2,
2125
+ 0,
2126
+ 1,
2127
+ 3,
2128
+ 5,
2129
+ 6,
2130
+ 9,
2131
+ 8,
2132
+ 4,
2133
+ 2,
2134
+ 0,
2135
+ 1,
2136
+ 3,
2137
+ 4,
2138
+ 6,
2139
+ 8,
2140
+ 4,
2141
+ 2,
2142
+ 0,
2143
+ 1,
2144
+ 3,
2145
+ 4,
2146
+ 6,
2147
+ 8,
2148
+ 6,
2149
+ 4,
2150
+ 2,
2151
+ 0,
2152
+ 1,
2153
+ 3,
2154
+ 3,
2155
+ 5,
2156
+ 6,
2157
+ 4,
2158
+ 2,
2159
+ 0,
2160
+ 1,
2161
+ 3,
2162
+ 3,
2163
+ 5,
2164
+ 6,
2165
+ 6,
2166
+ 4,
2167
+ 2,
2168
+ 0,
2169
+ 1,
2170
+ 2,
2171
+ 3,
2172
+ 6,
2173
+ 4,
2174
+ 2,
2175
+ 0,
2176
+ 1,
2177
+ 2,
2178
+ 3,
2179
+ 6,
2180
+ 4,
2181
+ 2,
2182
+ 6,
2183
+ 4,
2184
+ 2,
2185
+ 0,
2186
+ 1,
2187
+ 1,
2188
+ 6,
2189
+ 4,
2190
+ 2,
2191
+ 0,
2192
+ 1,
2193
+ 1,
2194
+ 6,
2195
+ 4,
2196
+ 2,
2197
+ 0,
2198
+ 1,
2199
+ 8,
2200
+ 6,
2201
+ 4,
2202
+ 2,
2203
+ 0,
2204
+ 0,
2205
+ 9,
2206
+ 8,
2207
+ 6,
2208
+ 4,
2209
+ 2,
2210
+ 0,
2211
+ 0,
2212
+ 9,
2213
+ 8,
2214
+ 6,
2215
+ 4,
2216
+ 8,
2217
+ 6,
2218
+ 4,
2219
+ 2,
2220
+ 0,
2221
+ 6,
2222
+ 8,
2223
+ 6,
2224
+ 4,
2225
+ 2,
2226
+ 0,
2227
+ 6,
2228
+ 8,
2229
+ 6,
2230
+ 4,
2231
+ 2,
2232
+ 0,
2233
+ 2,
2234
+ 4,
2235
+ 5,
2236
+ 8,
2237
+ 3,
2238
+ 1,
2239
+ 6,
2240
+ 2,
2241
+ 4,
2242
+ 5,
2243
+ 8,
2244
+ 3,
2245
+ 1,
2246
+ 6,
2247
+ 2,
2248
+ 4,
2249
+ 5,
2250
+ 7,
2251
+ 1,
2252
+ 1,
2253
+ 5,
2254
+ 0,
2255
+ 8,
2256
+ 7,
2257
+ 1,
2258
+ 1,
2259
+ 5,
2260
+ 0,
2261
+ 8,
2262
+ 7,
2263
+ 1,
2264
+ 1,
2265
+ 5,
2266
+ 0,
2267
+ 7,
2268
+ 1,
2269
+ 2,
2270
+ 8,
2271
+ 6,
2272
+ 0,
2273
+ 5,
2274
+ 9,
2275
+ 8,
2276
+ 8,
2277
+ 7,
2278
+ 1,
2279
+ 2,
2280
+ 8,
2281
+ 6,
2282
+ 0,
2283
+ 5,
2284
+ 8,
2285
+ 2,
2286
+ 1,
2287
+ 4,
2288
+ 0,
2289
+ 6,
2290
+ 7,
2291
+ 9,
2292
+ 8,
2293
+ 2,
2294
+ 1,
2295
+ 4,
2296
+ 0,
2297
+ 6,
2298
+ 7,
2299
+ 9,
2300
+ 8,
2301
+ 1,
2302
+ 0,
2303
+ 5,
2304
+ 5,
2305
+ 7,
2306
+ 1,
2307
+ 0,
2308
+ 5,
2309
+ 5,
2310
+ 7,
2311
+ 1,
2312
+ 0,
2313
+ 5,
2314
+ 5,
2315
+ 7,
2316
+ 1,
2317
+ 0,
2318
+ 4,
2319
+ 0,
2320
+ 2,
2321
+ 2,
2322
+ 6,
2323
+ 6,
2324
+ 2,
2325
+ 8,
2326
+ 4,
2327
+ 0,
2328
+ 2,
2329
+ 2,
2330
+ 6,
2331
+ 6,
2332
+ 2,
2333
+ 8,
2334
+ 4,
2335
+ 4,
2336
+ 0,
2337
+ 2,
2338
+ 1,
2339
+ 4,
2340
+ 7,
2341
+ 4,
2342
+ 4,
2343
+ 5,
2344
+ 9,
2345
+ 9,
2346
+ 7,
2347
+ 4,
2348
+ 0,
2349
+ 2,
2350
+ 1,
2351
+ 4,
2352
+ 5,
2353
+ 2,
2354
+ 0,
2355
+ 3,
2356
+ 9,
2357
+ 9,
2358
+ 4,
2359
+ 2,
2360
+ 7,
2361
+ 5,
2362
+ 4,
2363
+ 8,
2364
+ 9,
2365
+ 8,
2366
+ 6,
2367
+ 6,
2368
+ 5,
2369
+ 5,
2370
+ 7,
2371
+ 9,
2372
+ 0,
2373
+ 0,
2374
+ 3,
2375
+ 3,
2376
+ 2,
2377
+ 6,
2378
+ 7,
2379
+ 5,
2380
+ 7,
2381
+ 9,
2382
+ 0,
2383
+ 0,
2384
+ 3,
2385
+ 3,
2386
+ 2,
2387
+ 5,
2388
+ 0,
2389
+ 6,
2390
+ 7,
2391
+ 2,
2392
+ 5,
2393
+ 0,
2394
+ 6,
2395
+ 7,
2396
+ 2,
2397
+ 5,
2398
+ 0,
2399
+ 6,
2400
+ 7,
2401
+ 2,
2402
+ 5,
2403
+ 1,
2404
+ 1,
2405
+ 8,
2406
+ 5,
2407
+ 0,
2408
+ 4,
2409
+ 9,
2410
+ 7,
2411
+ 1,
2412
+ 1,
2413
+ 8,
2414
+ 5,
2415
+ 0,
2416
+ 4,
2417
+ 9,
2418
+ 7,
2419
+ 1,
2420
+ 8,
2421
+ 1,
2422
+ 1,
2423
+ 7,
2424
+ 4,
2425
+ 0,
2426
+ 6,
2427
+ 9,
2428
+ 8,
2429
+ 1,
2430
+ 1,
2431
+ 7,
2432
+ 4,
2433
+ 0,
2434
+ 6,
2435
+ 9,
2436
+ 8,
2437
+ 7,
2438
+ 2,
2439
+ 1,
2440
+ 0,
2441
+ 6,
2442
+ 9,
2443
+ 8,
2444
+ 9,
2445
+ 7,
2446
+ 2,
2447
+ 1,
2448
+ 0,
2449
+ 6,
2450
+ 9,
2451
+ 8,
2452
+ 9,
2453
+ 7,
2454
+ 8,
2455
+ 5,
2456
+ 4,
2457
+ 2,
2458
+ 2,
2459
+ 1,
2460
+ 6,
2461
+ 8,
2462
+ 5,
2463
+ 4,
2464
+ 2,
2465
+ 2,
2466
+ 1,
2467
+ 6,
2468
+ 8,
2469
+ 5,
2470
+ 4,
2471
+ 9,
2472
+ 7,
2473
+ 6,
2474
+ 3,
2475
+ 0,
2476
+ 0,
2477
+ 3,
2478
+ 6,
2479
+ 2,
2480
+ 7,
2481
+ 9,
2482
+ 7,
2483
+ 6,
2484
+ 3,
2485
+ 0,
2486
+ 0,
2487
+ 3,
2488
+ 7,
2489
+ 3,
2490
+ 0,
2491
+ 3,
2492
+ 5,
2493
+ 2,
2494
+ 2,
2495
+ 9,
2496
+ 8,
2497
+ 4,
2498
+ 5,
2499
+ 7,
2500
+ 6,
2501
+ 7,
2502
+ 9,
2503
+ 6,
2504
+ 7,
2505
+ 2,
2506
+ 0,
2507
+ 4,
2508
+ 2,
2509
+ 1,
2510
+ 3,
2511
+ 2,
2512
+ 7,
2513
+ 9,
2514
+ 5,
2515
+ 8,
2516
+ 2,
2517
+ 0,
2518
+ 4,
2519
+ 2,
2520
+ 1,
2521
+ 3,
2522
+ 0,
2523
+ 4,
2524
+ 3,
2525
+ 1,
2526
+ 5,
2527
+ 2,
2528
+ 6,
2529
+ 8,
2530
+ 0,
2531
+ 4,
2532
+ 3,
2533
+ 1,
2534
+ 5,
2535
+ 2,
2536
+ 6,
2537
+ 8,
2538
+ 0,
2539
+ 5,
2540
+ 6,
2541
+ 5,
2542
+ 5,
2543
+ 1,
2544
+ 5,
2545
+ 8,
2546
+ 8,
2547
+ 5,
2548
+ 6,
2549
+ 5,
2550
+ 5,
2551
+ 1,
2552
+ 5,
2553
+ 8,
2554
+ 8,
2555
+ 5,
2556
+ 0,
2557
+ 1,
2558
+ 9,
2559
+ 0,
2560
+ 1,
2561
+ 9,
2562
+ 0,
2563
+ 1,
2564
+ 9,
2565
+ 0,
2566
+ 1,
2567
+ 9,
2568
+ 0,
2569
+ 1,
2570
+ 9,
2571
+ 0,
2572
+ 1,
2573
+ 7,
2574
+ 0,
2575
+ 1,
2576
+ 9,
2577
+ 9,
2578
+ 9,
2579
+ 9,
2580
+ 7,
2581
+ 0,
2582
+ 1,
2583
+ 9,
2584
+ 9,
2585
+ 9,
2586
+ 9,
2587
+ 7,
2588
+ 0,
2589
+ 1,
2590
+ 4,
2591
+ 0,
2592
+ 5,
2593
+ 2,
2594
+ 0,
2595
+ 2,
2596
+ 4,
2597
+ 4,
2598
+ 0,
2599
+ 5,
2600
+ 2,
2601
+ 0,
2602
+ 2,
2603
+ 4,
2604
+ 4,
2605
+ 0,
2606
+ 5,
2607
+ 6,
2608
+ 5,
2609
+ 0,
2610
+ 8,
2611
+ 6,
2612
+ 6,
2613
+ 9,
2614
+ 6,
2615
+ 6,
2616
+ 5,
2617
+ 0,
2618
+ 8,
2619
+ 6,
2620
+ 6,
2621
+ 9,
2622
+ 6,
2623
+ 6,
2624
+ 3,
2625
+ 2,
2626
+ 0,
2627
+ 2,
2628
+ 7,
2629
+ 7,
2630
+ 5,
2631
+ 7,
2632
+ 8,
2633
+ 3,
2634
+ 2,
2635
+ 0,
2636
+ 2,
2637
+ 7,
2638
+ 7,
2639
+ 5,
2640
+ 7,
2641
+ 2,
2642
+ 0,
2643
+ 2,
2644
+ 1,
2645
+ 1,
2646
+ 2,
2647
+ 4,
2648
+ 3,
2649
+ 5,
2650
+ 7,
2651
+ 2,
2652
+ 0,
2653
+ 2,
2654
+ 1,
2655
+ 1,
2656
+ 2,
2657
+ 4,
2658
+ 4,
2659
+ 3,
2660
+ 7,
2661
+ 1,
2662
+ 0,
2663
+ 5,
2664
+ 4,
2665
+ 8,
2666
+ 8,
2667
+ 8,
2668
+ 4,
2669
+ 3,
2670
+ 7,
2671
+ 1,
2672
+ 0,
2673
+ 5,
2674
+ 4,
2675
+ 7,
2676
+ 4,
2677
+ 7,
2678
+ 0,
2679
+ 9,
2680
+ 6,
2681
+ 6,
2682
+ 6,
2683
+ 7,
2684
+ 4,
2685
+ 7,
2686
+ 0,
2687
+ 9,
2688
+ 6,
2689
+ 6,
2690
+ 6,
2691
+ 7,
2692
+ 4,
2693
+ 5,
2694
+ 6,
2695
+ 7,
2696
+ 8,
2697
+ 2,
2698
+ 5,
2699
+ 4,
2700
+ 1,
2701
+ 9,
2702
+ 6,
2703
+ 1,
2704
+ 9,
2705
+ 4,
2706
+ 5,
2707
+ 6,
2708
+ 7,
2709
+ 8,
2710
+ 9,
2711
+ 3,
2712
+ 4,
2713
+ 6,
2714
+ 2,
2715
+ 3,
2716
+ 1,
2717
+ 2,
2718
+ 9,
2719
+ 7,
2720
+ 4,
2721
+ 0,
2722
+ 5,
2723
+ 8,
2724
+ 9,
2725
+ 3,
2726
+ 9,
2727
+ 6,
2728
+ 5,
2729
+ 6,
2730
+ 7,
2731
+ 7,
2732
+ 3,
2733
+ 1,
2734
+ 7,
2735
+ 4,
2736
+ 2,
2737
+ 3,
2738
+ 6,
2739
+ 4,
2740
+ 1,
2741
+ 4,
2742
+ 0,
2743
+ 8,
2744
+ 5,
2745
+ 3,
2746
+ 3,
2747
+ 1,
2748
+ 8,
2749
+ 8,
2750
+ 9,
2751
+ 7,
2752
+ 3,
2753
+ 1,
2754
+ 0,
2755
+ 5,
2756
+ 8,
2757
+ 3,
2758
+ 8,
2759
+ 5,
2760
+ 8,
2761
+ 4,
2762
+ 2,
2763
+ 8,
2764
+ 4,
2765
+ 3,
2766
+ 9,
2767
+ 1,
2768
+ 1,
2769
+ 7,
2770
+ 8,
2771
+ 8,
2772
+ 4,
2773
+ 2,
2774
+ 8,
2775
+ 4,
2776
+ 3,
2777
+ 9,
2778
+ 6,
2779
+ 5,
2780
+ 9,
2781
+ 7,
2782
+ 9,
2783
+ 6,
2784
+ 0,
2785
+ 0,
2786
+ 3,
2787
+ 5,
2788
+ 2,
2789
+ 9,
2790
+ 6,
2791
+ 5,
2792
+ 9,
2793
+ 7,
2794
+ 9,
2795
+ 3,
2796
+ 4,
2797
+ 1,
2798
+ 5,
2799
+ 5,
2800
+ 3,
2801
+ 2,
2802
+ 1,
2803
+ 9,
2804
+ 3,
2805
+ 4,
2806
+ 1,
2807
+ 5,
2808
+ 5,
2809
+ 3,
2810
+ 2,
2811
+ 9,
2812
+ 8,
2813
+ 8,
2814
+ 9,
2815
+ 6,
2816
+ 7,
2817
+ 9,
2818
+ 9,
2819
+ 6,
2820
+ 0,
2821
+ 0,
2822
+ 5,
2823
+ 6,
2824
+ 2,
2825
+ 4,
2826
+ 9,
2827
+ 8,
2828
+ 4,
2829
+ 8,
2830
+ 8,
2831
+ 2,
2832
+ 3,
2833
+ 2,
2834
+ 8,
2835
+ 1,
2836
+ 8,
2837
+ 1,
2838
+ 9,
2839
+ 4,
2840
+ 8,
2841
+ 8,
2842
+ 2,
2843
+ 3,
2844
+ 2,
2845
+ 3,
2846
+ 5,
2847
+ 8,
2848
+ 8,
2849
+ 1,
2850
+ 3,
2851
+ 9,
2852
+ 0,
2853
+ 3,
2854
+ 7,
2855
+ 8,
2856
+ 5,
2857
+ 0,
2858
+ 5,
2859
+ 3,
2860
+ 5,
2861
+ 8,
2862
+ 9,
2863
+ 6,
2864
+ 5,
2865
+ 6,
2866
+ 8,
2867
+ 6,
2868
+ 1,
2869
+ 4,
2870
+ 7,
2871
+ 6,
2872
+ 4,
2873
+ 2,
2874
+ 5,
2875
+ 4,
2876
+ 2,
2877
+ 4,
2878
+ 0,
2879
+ 9,
2880
+ 8,
2881
+ 6,
2882
+ 4,
2883
+ 3,
2884
+ 3,
2885
+ 4,
2886
+ 9,
2887
+ 1,
2888
+ 1,
2889
+ 0,
2890
+ 4,
2891
+ 7,
2892
+ 2,
2893
+ 9,
2894
+ 8,
2895
+ 6,
2896
+ 8,
2897
+ 7,
2898
+ 7,
2899
+ 5,
2900
+ 4,
2901
+ 5,
2902
+ 2,
2903
+ 5,
2904
+ 8,
2905
+ 1,
2906
+ 1,
2907
+ 6,
2908
+ 7,
2909
+ 8,
2910
+ 7,
2911
+ 7,
2912
+ 5,
2913
+ 9,
2914
+ 8,
2915
+ 8,
2916
+ 9,
2917
+ 9,
2918
+ 7,
2919
+ 4,
2920
+ 7,
2921
+ 9,
2922
+ 5,
2923
+ 0,
2924
+ 0,
2925
+ 1,
2926
+ 6,
2927
+ 3,
2928
+ 9,
2929
+ 8,
2930
+ 9,
2931
+ 5,
2932
+ 5,
2933
+ 2,
2934
+ 4,
2935
+ 3,
2936
+ 2,
2937
+ 3,
2938
+ 1,
2939
+ 9,
2940
+ 5,
2941
+ 5,
2942
+ 2,
2943
+ 4,
2944
+ 3,
2945
+ 2,
2946
+ 3,
2947
+ 6,
2948
+ 9,
2949
+ 9,
2950
+ 6,
2951
+ 8,
2952
+ 1,
2953
+ 0,
2954
+ 6,
2955
+ 8,
2956
+ 9,
2957
+ 5,
2958
+ 3,
2959
+ 4,
2960
+ 6,
2961
+ 9,
2962
+ 9,
2963
+ 6,
2964
+ 9,
2965
+ 8,
2966
+ 6,
2967
+ 6,
2968
+ 5,
2969
+ 6,
2970
+ 7,
2971
+ 8,
2972
+ 4,
2973
+ 2,
2974
+ 0,
2975
+ 8,
2976
+ 7,
2977
+ 9,
2978
+ 8,
2979
+ 6,
2980
+ 6,
2981
+ 1,
2982
+ 5,
2983
+ 2,
2984
+ 7,
2985
+ 5,
2986
+ 3,
2987
+ 2,
2988
+ 0,
2989
+ 3,
2990
+ 1,
2991
+ 5,
2992
+ 2,
2993
+ 7,
2994
+ 5,
2995
+ 3,
2996
+ 2,
2997
+ 0,
2998
+ 7,
2999
+ 4,
3000
+ 3,
3001
+ 4,
3002
+ 9,
3003
+ 7,
3004
+ 5,
3005
+ 1,
3006
+ 3,
3007
+ 7,
3008
+ 7,
3009
+ 6,
3010
+ 7,
3011
+ 2,
3012
+ 2,
3013
+ 3,
3014
+ 4,
3015
+ 6,
3016
+ 7,
3017
+ 4,
3018
+ 3,
3019
+ 4,
3020
+ 6,
3021
+ 9,
3022
+ 0,
3023
+ 0,
3024
+ 9,
3025
+ 9,
3026
+ 6,
3027
+ 9,
3028
+ 7,
3029
+ 0,
3030
+ 7,
3031
+ 2,
3032
+ 8,
3033
+ 5,
3034
+ 3,
3035
+ 3,
3036
+ 3,
3037
+ 2,
3038
+ 5,
3039
+ 7,
3040
+ 6,
3041
+ 7,
3042
+ 8,
3043
+ 3,
3044
+ 2,
3045
+ 7,
3046
+ 4,
3047
+ 4,
3048
+ 8,
3049
+ 5,
3050
+ 1,
3051
+ 6,
3052
+ 2,
3053
+ 3,
3054
+ 5,
3055
+ 0,
3056
+ 2,
3057
+ 3,
3058
+ 5,
3059
+ 1,
3060
+ 6,
3061
+ 2,
3062
+ 3,
3063
+ 5,
3064
+ 0,
3065
+ 2,
3066
+ 7,
3067
+ 6,
3068
+ 6,
3069
+ 6,
3070
+ 7,
3071
+ 8,
3072
+ 9,
3073
+ 8,
3074
+ 4,
3075
+ 2,
3076
+ 8,
3077
+ 0,
3078
+ 8,
3079
+ 7,
3080
+ 6,
3081
+ 6,
3082
+ 6,
3083
+ 8,
3084
+ 7,
3085
+ 6,
3086
+ 5,
3087
+ 7,
3088
+ 8,
3089
+ 9,
3090
+ 3,
3091
+ 1,
3092
+ 1,
3093
+ 3,
3094
+ 1,
3095
+ 2,
3096
+ 8,
3097
+ 7,
3098
+ 6,
3099
+ 5,
3100
+ 7,
3101
+ 5,
3102
+ 4,
3103
+ 5,
3104
+ 9,
3105
+ 7,
3106
+ 5,
3107
+ 5,
3108
+ 1,
3109
+ 4,
3110
+ 5,
3111
+ 1,
3112
+ 5,
3113
+ 7,
3114
+ 5,
3115
+ 4,
3116
+ 5,
3117
+ 8,
3118
+ 5,
3119
+ 4,
3120
+ 6,
3121
+ 8,
3122
+ 8,
3123
+ 2,
3124
+ 2,
3125
+ 8,
3126
+ 4,
3127
+ 9,
3128
+ 0,
3129
+ 9,
3130
+ 8,
3131
+ 5,
3132
+ 4,
3133
+ 6,
3134
+ 9,
3135
+ 8,
3136
+ 4,
3137
+ 4,
3138
+ 6,
3139
+ 8,
3140
+ 5,
3141
+ 8,
3142
+ 2,
3143
+ 5,
3144
+ 5,
3145
+ 4,
3146
+ 6,
3147
+ 1,
3148
+ 9,
3149
+ 8,
3150
+ 4,
3151
+ 9,
3152
+ 8,
3153
+ 5,
3154
+ 4,
3155
+ 6,
3156
+ 7,
3157
+ 1,
3158
+ 3,
3159
+ 1,
3160
+ 1,
3161
+ 3,
3162
+ 2,
3163
+ 9,
3164
+ 8,
3165
+ 5,
3166
+ 4,
3167
+ 6,
3168
+ 9,
3169
+ 8,
3170
+ 7,
3171
+ 7,
3172
+ 8,
3173
+ 9,
3174
+ 9,
3175
+ 6,
3176
+ 0,
3177
+ 2,
3178
+ 8,
3179
+ 1,
3180
+ 5,
3181
+ 5,
3182
+ 9,
3183
+ 8,
3184
+ 7,
3185
+ 3,
3186
+ 6,
3187
+ 3,
3188
+ 0,
3189
+ 2,
3190
+ 8,
3191
+ 3,
3192
+ 4,
3193
+ 3,
3194
+ 6,
3195
+ 0,
3196
+ 3,
3197
+ 6,
3198
+ 3,
3199
+ 0,
3200
+ 2,
3201
+ 8,
3202
+ 8,
3203
+ 6,
3204
+ 8,
3205
+ 1,
3206
+ 0,
3207
+ 1,
3208
+ 9,
3209
+ 6,
3210
+ 3,
3211
+ 6,
3212
+ 9,
3213
+ 6,
3214
+ 6,
3215
+ 9,
3216
+ 7,
3217
+ 1,
3218
+ 8,
3219
+ 6,
3220
+ 5,
3221
+ 6,
3222
+ 2,
3223
+ 0,
3224
+ 3,
3225
+ 4,
3226
+ 3,
3227
+ 9,
3228
+ 5,
3229
+ 3,
3230
+ 0,
3231
+ 9,
3232
+ 6,
3233
+ 5,
3234
+ 6,
3235
+ 2,
3236
+ 9,
3237
+ 8,
3238
+ 8,
3239
+ 7,
3240
+ 7,
3241
+ 9,
3242
+ 9,
3243
+ 7,
3244
+ 2,
3245
+ 0,
3246
+ 1,
3247
+ 8,
3248
+ 5,
3249
+ 5,
3250
+ 9,
3251
+ 8,
3252
+ 8,
3253
+ 9,
3254
+ 8,
3255
+ 9,
3256
+ 8,
3257
+ 1,
3258
+ 4,
3259
+ 0,
3260
+ 0,
3261
+ 4,
3262
+ 8,
3263
+ 1,
3264
+ 4,
3265
+ 7,
3266
+ 9,
3267
+ 8,
3268
+ 9,
3269
+ 8,
3270
+ 8,
3271
+ 9,
3272
+ 9,
3273
+ 6,
3274
+ 4,
3275
+ 7,
3276
+ 7,
3277
+ 4,
3278
+ 0,
3279
+ 4,
3280
+ 7,
3281
+ 9,
3282
+ 1,
3283
+ 9,
3284
+ 6,
3285
+ 6,
3286
+ 8,
3287
+ 8,
3288
+ 9,
3289
+ 9,
3290
+ 4,
3291
+ 1,
3292
+ 8,
3293
+ 5,
3294
+ 0,
3295
+ 0,
3296
+ 4,
3297
+ 1,
3298
+ 9,
3299
+ 8,
3300
+ 8,
3301
+ 9,
3302
+ 9,
3303
+ 4,
3304
+ 9,
3305
+ 7,
3306
+ 7,
3307
+ 8,
3308
+ 7,
3309
+ 7,
3310
+ 8,
3311
+ 5,
3312
+ 3,
3313
+ 0,
3314
+ 2,
3315
+ 3,
3316
+ 2,
3317
+ 0,
3318
+ 3,
3319
+ 9,
3320
+ 7,
3321
+ 7,
3322
+ 7,
3323
+ 9,
3324
+ 8,
3325
+ 7,
3326
+ 7,
3327
+ 8,
3328
+ 4,
3329
+ 3,
3330
+ 0,
3331
+ 3,
3332
+ 4,
3333
+ 3,
3334
+ 0,
3335
+ 2,
3336
+ 7,
3337
+ 7,
3338
+ ]
third_party/PIPNet/run_demo.sh ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # image
2
+ python lib/demo.py experiments/WFLW/pip_32_16_60_r18_l2_l1_10_1_nb10.py images/1.jpg
3
+ #python lib/demo.py experiments/data_300W_CELEBA/pip_32_16_60_r18_l2_l1_10_1_nb10_wcc.py images/2.jpg
4
+
5
+ # video
6
+ #python lib/demo_video.py experiments/WFLW/pip_32_16_60_r18_l2_l1_10_1_nb10.py videos/002.avi
7
+ #python lib/demo_video.py experiments/data_300W_CELEBA/pip_32_16_60_r18_l2_l1_10_1_nb10_wcc.py videos/007.avi
8
+
9
+ # camera
10
+ #python lib/demo_video.py experiments/WFLW/pip_32_16_60_r18_l2_l1_10_1_nb10.py camera
11
+
third_party/PIPNet/run_test.sh ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # supervised learning
2
+
3
+ # 300W, resnet18
4
+ #python lib/test.py experiments/data_300W/pip_32_16_60_r18_l2_l1_10_1_nb10.py test.txt images_test
5
+ # 300W, resnet101
6
+ #python lib/test.py experiments/data_300W/pip_32_16_60_r101_l2_l1_10_1_nb10.py test.txt images_test
7
+
8
+ # COFW, resnet18
9
+ #python lib/test.py experiments/COFW/pip_32_16_60_r18_l2_l1_10_1_nb10.py test.txt images_test
10
+ # COFW, resnet101
11
+ #python lib/test.py experiments/COFW/pip_32_16_60_r101_l2_l1_10_1_nb10.py test.txt images_test
12
+
13
+ # WFLW, resnet18
14
+ #python lib/test.py experiments/WFLW/pip_32_16_60_r18_l2_l1_10_1_nb10.py test.txt images_test
15
+ # WFLW, resnet101
16
+ #python lib/test.py experiments/WFLW/pip_32_16_60_r101_l2_l1_10_1_nb10.py test.txt images_test
17
+
18
+ # AFLW, resnet18
19
+ #python lib/test.py experiments/AFLW/pip_32_16_60_r18_l2_l1_10_1_nb10.py test.txt images_test
20
+ # AFLW, resnet101
21
+ #python lib/test.py experiments/AFLW/pip_32_16_60_r101_l2_l1_10_1_nb10.py test.txt images_test
22
+
23
+ ######################################################################################
24
+ # GSSL
25
+
26
+ # 300W + COFW_68 (unlabeled) + WFLW_68 (unlabeled), resnet18, with curriculum
27
+ #python lib/test.py experiments/data_300W_COFW_WFLW/pip_32_16_60_r18_l2_l1_10_1_nb10_wcc.py test_300W.txt images_test_300W
28
+ #python lib/test.py experiments/data_300W_COFW_WFLW/pip_32_16_60_r18_l2_l1_10_1_nb10_wcc.py test_COFW.txt images_test_COFW
29
+ #python lib/test.py experiments/data_300W_COFW_WFLW/pip_32_16_60_r18_l2_l1_10_1_nb10_wcc.py test_WFLW.txt images_test_WFLW
30
+
31
+ # 300W + CelebA (unlabeled), resnet18, with curriculum
32
+ #python lib/test.py experiments/data_300W_CELEBA/pip_32_16_60_r18_l2_l1_10_1_nb10_wcc.py test_300W.txt images_test_300W
33
+ #python lib/test.py experiments/data_300W_CELEBA/pip_32_16_60_r18_l2_l1_10_1_nb10_wcc.py test_COFW.txt images_test_COFW
34
+ #python lib/test.py experiments/data_300W_CELEBA/pip_32_16_60_r18_l2_l1_10_1_nb10_wcc.py test_WFLW.txt images_test_WFLW
third_party/PIPNet/run_train.sh ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ######################################################################################
2
+ # supervised learning
3
+
4
+ # 300W, resnet18
5
+ #python lib/train.py experiments/data_300W/pip_32_16_60_r18_l2_l1_10_1_nb10.py
6
+ # 300W, resnet101
7
+ #python lib/train.py experiments/data_300W/pip_32_16_60_r101_l2_l1_10_1_nb10.py
8
+
9
+ # COFW, resnet18
10
+ #python lib/train.py experiments/COFW/pip_32_16_60_r18_l2_l1_10_1_nb10.py
11
+ # COFW, resnet101
12
+ #python lib/train.py experiments/COFW/pip_32_16_60_r101_l2_l1_10_1_nb10.py
13
+
14
+ # WFLW, resnet18
15
+ #python lib/train.py experiments/WFLW/pip_32_16_60_r18_l2_l1_10_1_nb10.py
16
+ # WFLW, resnet101
17
+ #python lib/train.py experiments/WFLW/pip_32_16_60_r101_l2_l1_10_1_nb10.py
18
+
19
+ # AFLW, resnet18
20
+ #python lib/train.py experiments/AFLW/pip_32_16_60_r18_l2_l1_10_1_nb10.py
21
+ # AFLW, resnet101
22
+ #python lib/train.py experiments/AFLW/pip_32_16_60_r101_l2_l1_10_1_nb10.py
23
+
24
+ ######################################################################################
25
+ # GSSL
26
+
27
+ # 300W + COFW_68 (unlabeled) + WFLW_68 (unlabeled), resnet18, with curriculum
28
+ #python lib/train_gssl.py experiments/data_300W_COFW_WFLW/pip_32_16_60_r18_l2_l1_10_1_nb10_wcc.py
29
+
30
+ # 300W + CelebA (unlabeled), resnet18, with curriculum
31
+ #nohup python lib/train_gssl.py experiments/data_300W_CELEBA/pip_32_16_60_r18_l2_l1_10_1_nb10_wcc.py &
32
+
33
+
weights/PIPNet/FaceBoxesV2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aae07fec4b62ac655508c06336662538803407852312ca5009fd93fb487d8cd7
3
+ size 4153573
weights/PIPNet/epoch59.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63ad4b1598d8933da1fcf9170cbe4b624660bc4d42debce42ac44e90772cbba0
3
+ size 189011113
weights/arcface/mouth_net_28_56_84_112.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be0e327057e3dcf1d676e3a186f7059df4d8f1ea9d9dab71a76c74823a8b51bf
3
+ size 127486882