Delete CodeFormer/inference_codeformer.py

#72
by Mdsohel - opened
Files changed (1) hide show
  1. CodeFormer/inference_codeformer.py +0 -274
CodeFormer/inference_codeformer.py DELETED
@@ -1,274 +0,0 @@
1
- import os
2
- import cv2
3
- import argparse
4
- import glob
5
- import torch
6
- from torchvision.transforms.functional import normalize
7
- from basicsr.utils import imwrite, img2tensor, tensor2img
8
- from basicsr.utils.download_util import load_file_from_url
9
- from basicsr.utils.misc import gpu_is_available, get_device
10
- from facelib.utils.face_restoration_helper import FaceRestoreHelper
11
- from facelib.utils.misc import is_gray
12
-
13
- from basicsr.utils.registry import ARCH_REGISTRY
14
-
15
- pretrain_model_url = {
16
- 'restoration': 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth',
17
- }
18
-
19
- def set_realesrgan():
20
- from basicsr.archs.rrdbnet_arch import RRDBNet
21
- from basicsr.utils.realesrgan_utils import RealESRGANer
22
-
23
- use_half = False
24
- if torch.cuda.is_available(): # set False in CPU/MPS mode
25
- no_half_gpu_list = ['1650', '1660'] # set False for GPUs that don't support f16
26
- if not True in [gpu in torch.cuda.get_device_name(0) for gpu in no_half_gpu_list]:
27
- use_half = True
28
-
29
- model = RRDBNet(
30
- num_in_ch=3,
31
- num_out_ch=3,
32
- num_feat=64,
33
- num_block=23,
34
- num_grow_ch=32,
35
- scale=2,
36
- )
37
- upsampler = RealESRGANer(
38
- scale=2,
39
- model_path="https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/RealESRGAN_x2plus.pth",
40
- model=model,
41
- tile=args.bg_tile,
42
- tile_pad=40,
43
- pre_pad=0,
44
- half=use_half
45
- )
46
-
47
- if not gpu_is_available(): # CPU
48
- import warnings
49
- warnings.warn('Running on CPU now! Make sure your PyTorch version matches your CUDA.'
50
- 'The unoptimized RealESRGAN is slow on CPU. '
51
- 'If you want to disable it, please remove `--bg_upsampler` and `--face_upsample` in command.',
52
- category=RuntimeWarning)
53
- return upsampler
54
-
55
- if __name__ == '__main__':
56
- # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
57
- device = get_device()
58
- parser = argparse.ArgumentParser()
59
-
60
- parser.add_argument('-i', '--input_path', type=str, default='./inputs/whole_imgs',
61
- help='Input image, video or folder. Default: inputs/whole_imgs')
62
- parser.add_argument('-o', '--output_path', type=str, default=None,
63
- help='Output folder. Default: results/<input_name>_<w>')
64
- parser.add_argument('-w', '--fidelity_weight', type=float, default=0.5,
65
- help='Balance the quality and fidelity. Default: 0.5')
66
- parser.add_argument('-s', '--upscale', type=int, default=2,
67
- help='The final upsampling scale of the image. Default: 2')
68
- parser.add_argument('--has_aligned', action='store_true', help='Input are cropped and aligned faces. Default: False')
69
- parser.add_argument('--only_center_face', action='store_true', help='Only restore the center face. Default: False')
70
- parser.add_argument('--draw_box', action='store_true', help='Draw the bounding box for the detected faces. Default: False')
71
- # large det_model: 'YOLOv5l', 'retinaface_resnet50'
72
- # small det_model: 'YOLOv5n', 'retinaface_mobile0.25'
73
- parser.add_argument('--detection_model', type=str, default='retinaface_resnet50',
74
- help='Face detector. Optional: retinaface_resnet50, retinaface_mobile0.25, YOLOv5l, YOLOv5n, dlib. \
75
- Default: retinaface_resnet50')
76
- parser.add_argument('--bg_upsampler', type=str, default='None', help='Background upsampler. Optional: realesrgan')
77
- parser.add_argument('--face_upsample', action='store_true', help='Face upsampler after enhancement. Default: False')
78
- parser.add_argument('--bg_tile', type=int, default=400, help='Tile size for background sampler. Default: 400')
79
- parser.add_argument('--suffix', type=str, default=None, help='Suffix of the restored faces. Default: None')
80
- parser.add_argument('--save_video_fps', type=float, default=None, help='Frame rate for saving video. Default: None')
81
-
82
- args = parser.parse_args()
83
-
84
- # ------------------------ input & output ------------------------
85
- w = args.fidelity_weight
86
- input_video = False
87
- if args.input_path.endswith(('jpg', 'jpeg', 'png', 'JPG', 'JPEG', 'PNG')): # input single img path
88
- input_img_list = [args.input_path]
89
- result_root = f'results/test_img_{w}'
90
- elif args.input_path.endswith(('mp4', 'mov', 'avi', 'MP4', 'MOV', 'AVI')): # input video path
91
- from basicsr.utils.video_util import VideoReader, VideoWriter
92
- input_img_list = []
93
- vidreader = VideoReader(args.input_path)
94
- image = vidreader.get_frame()
95
- while image is not None:
96
- input_img_list.append(image)
97
- image = vidreader.get_frame()
98
- audio = vidreader.get_audio()
99
- fps = vidreader.get_fps() if args.save_video_fps is None else args.save_video_fps
100
- video_name = os.path.basename(args.input_path)[:-4]
101
- result_root = f'results/{video_name}_{w}'
102
- input_video = True
103
- vidreader.close()
104
- else: # input img folder
105
- if args.input_path.endswith('/'): # solve when path ends with /
106
- args.input_path = args.input_path[:-1]
107
- # scan all the jpg and png images
108
- input_img_list = sorted(glob.glob(os.path.join(args.input_path, '*.[jpJP][pnPN]*[gG]')))
109
- result_root = f'results/{os.path.basename(args.input_path)}_{w}'
110
-
111
- if not args.output_path is None: # set output path
112
- result_root = args.output_path
113
-
114
- test_img_num = len(input_img_list)
115
- if test_img_num == 0:
116
- raise FileNotFoundError('No input image/video is found...\n'
117
- '\tNote that --input_path for video should end with .mp4|.mov|.avi')
118
-
119
- # ------------------ set up background upsampler ------------------
120
- if args.bg_upsampler == 'realesrgan':
121
- bg_upsampler = set_realesrgan()
122
- else:
123
- bg_upsampler = None
124
-
125
- # ------------------ set up face upsampler ------------------
126
- if args.face_upsample:
127
- if bg_upsampler is not None:
128
- face_upsampler = bg_upsampler
129
- else:
130
- face_upsampler = set_realesrgan()
131
- else:
132
- face_upsampler = None
133
-
134
- # ------------------ set up CodeFormer restorer -------------------
135
- net = ARCH_REGISTRY.get('CodeFormer')(dim_embd=512, codebook_size=1024, n_head=8, n_layers=9,
136
- connect_list=['32', '64', '128', '256']).to(device)
137
-
138
- # ckpt_path = 'weights/CodeFormer/codeformer.pth'
139
- ckpt_path = load_file_from_url(url=pretrain_model_url['restoration'],
140
- model_dir='weights/CodeFormer', progress=True, file_name=None)
141
- checkpoint = torch.load(ckpt_path)['params_ema']
142
- net.load_state_dict(checkpoint)
143
- net.eval()
144
-
145
- # ------------------ set up FaceRestoreHelper -------------------
146
- # large det_model: 'YOLOv5l', 'retinaface_resnet50'
147
- # small det_model: 'YOLOv5n', 'retinaface_mobile0.25'
148
- if not args.has_aligned:
149
- print(f'Face detection model: {args.detection_model}')
150
- if bg_upsampler is not None:
151
- print(f'Background upsampling: True, Face upsampling: {args.face_upsample}')
152
- else:
153
- print(f'Background upsampling: False, Face upsampling: {args.face_upsample}')
154
-
155
- face_helper = FaceRestoreHelper(
156
- args.upscale,
157
- face_size=512,
158
- crop_ratio=(1, 1),
159
- det_model = args.detection_model,
160
- save_ext='png',
161
- use_parse=True,
162
- device=device)
163
-
164
- # -------------------- start to processing ---------------------
165
- for i, img_path in enumerate(input_img_list):
166
- # clean all the intermediate results to process the next image
167
- face_helper.clean_all()
168
-
169
- if isinstance(img_path, str):
170
- img_name = os.path.basename(img_path)
171
- basename, ext = os.path.splitext(img_name)
172
- print(f'[{i+1}/{test_img_num}] Processing: {img_name}')
173
- img = cv2.imread(img_path, cv2.IMREAD_COLOR)
174
- else: # for video processing
175
- basename = str(i).zfill(6)
176
- img_name = f'{video_name}_{basename}' if input_video else basename
177
- print(f'[{i+1}/{test_img_num}] Processing: {img_name}')
178
- img = img_path
179
-
180
- if args.has_aligned:
181
- # the input faces are already cropped and aligned
182
- img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_LINEAR)
183
- face_helper.is_gray = is_gray(img, threshold=10)
184
- if face_helper.is_gray:
185
- print('Grayscale input: True')
186
- face_helper.cropped_faces = [img]
187
- else:
188
- face_helper.read_image(img)
189
- # get face landmarks for each face
190
- num_det_faces = face_helper.get_face_landmarks_5(
191
- only_center_face=args.only_center_face, resize=640, eye_dist_threshold=5)
192
- print(f'\tdetect {num_det_faces} faces')
193
- # align and warp each face
194
- face_helper.align_warp_face()
195
-
196
- # face restoration for each cropped face
197
- for idx, cropped_face in enumerate(face_helper.cropped_faces):
198
- # prepare data
199
- cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True)
200
- normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
201
- cropped_face_t = cropped_face_t.unsqueeze(0).to(device)
202
-
203
- try:
204
- with torch.no_grad():
205
- output = net(cropped_face_t, w=w, adain=True)[0]
206
- restored_face = tensor2img(output, rgb2bgr=True, min_max=(-1, 1))
207
- del output
208
- torch.cuda.empty_cache()
209
- except Exception as error:
210
- print(f'\tFailed inference for CodeFormer: {error}')
211
- restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1))
212
-
213
- restored_face = restored_face.astype('uint8')
214
- face_helper.add_restored_face(restored_face, cropped_face)
215
-
216
- # paste_back
217
- if not args.has_aligned:
218
- # upsample the background
219
- if bg_upsampler is not None:
220
- # Now only support RealESRGAN for upsampling background
221
- bg_img = bg_upsampler.enhance(img, outscale=args.upscale)[0]
222
- else:
223
- bg_img = None
224
- face_helper.get_inverse_affine(None)
225
- # paste each restored face to the input image
226
- if args.face_upsample and face_upsampler is not None:
227
- restored_img = face_helper.paste_faces_to_input_image(upsample_img=bg_img, draw_box=args.draw_box, face_upsampler=face_upsampler)
228
- else:
229
- restored_img = face_helper.paste_faces_to_input_image(upsample_img=bg_img, draw_box=args.draw_box)
230
-
231
- # save faces
232
- for idx, (cropped_face, restored_face) in enumerate(zip(face_helper.cropped_faces, face_helper.restored_faces)):
233
- # save cropped face
234
- if not args.has_aligned:
235
- save_crop_path = os.path.join(result_root, 'cropped_faces', f'{basename}_{idx:02d}.png')
236
- imwrite(cropped_face, save_crop_path)
237
- # save restored face
238
- if args.has_aligned:
239
- save_face_name = f'{basename}.png'
240
- else:
241
- save_face_name = f'{basename}_{idx:02d}.png'
242
- if args.suffix is not None:
243
- save_face_name = f'{save_face_name[:-4]}_{args.suffix}.png'
244
- save_restore_path = os.path.join(result_root, 'restored_faces', save_face_name)
245
- imwrite(restored_face, save_restore_path)
246
-
247
- # save restored img
248
- if not args.has_aligned and restored_img is not None:
249
- if args.suffix is not None:
250
- basename = f'{basename}_{args.suffix}'
251
- save_restore_path = os.path.join(result_root, 'final_results', f'{basename}.png')
252
- imwrite(restored_img, save_restore_path)
253
-
254
- # save enhanced video
255
- if input_video:
256
- print('Video Saving...')
257
- # load images
258
- video_frames = []
259
- img_list = sorted(glob.glob(os.path.join(result_root, 'final_results', '*.[jp][pn]g')))
260
- for img_path in img_list:
261
- img = cv2.imread(img_path)
262
- video_frames.append(img)
263
- # write images to video
264
- height, width = video_frames[0].shape[:2]
265
- if args.suffix is not None:
266
- video_name = f'{video_name}_{args.suffix}.png'
267
- save_restore_path = os.path.join(result_root, f'{video_name}.mp4')
268
- vidwriter = VideoWriter(save_restore_path, height, width, fps, audio)
269
-
270
- for f in video_frames:
271
- vidwriter.write_frame(f)
272
- vidwriter.close()
273
-
274
- print(f'\nAll results are saved in {result_root}')