Spaces:
Running
Running
gavinyuan
commited on
Commit
•
1d86bdc
1
Parent(s):
e2d0b84
update: app.py, gpen for video
Browse files- app.py +19 -9
- inference/utils.py +2 -2
app.py
CHANGED
@@ -215,7 +215,7 @@ def swap_image(
|
|
215 |
os.makedirs(out_path, exist_ok=True)
|
216 |
Image.fromarray(result.astype(np.uint8)).save(os.path.join(out_path, name))
|
217 |
save((result, M, original_target, os.path.join(out_path, "paste_back_" + name), None),
|
218 |
-
trick=trick,
|
219 |
|
220 |
|
221 |
def process_video(
|
@@ -230,6 +230,7 @@ def process_video(
|
|
230 |
frames=9999999,
|
231 |
use_tddfav2=False,
|
232 |
landmark_smooth="kalman",
|
|
|
233 |
):
|
234 |
if isinstance(G, torch.nn.Module):
|
235 |
G.eval()
|
@@ -336,7 +337,7 @@ def process_video(
|
|
336 |
target = output[0][0] * 0.5 + 0.5
|
337 |
else:
|
338 |
target = output[0] * 0.5 + 0.5
|
339 |
-
targets.append(np.array(tensor2pil_transform(target)))
|
340 |
Ms.append(M)
|
341 |
count += 1
|
342 |
if count > frames:
|
@@ -373,7 +374,7 @@ def swap_image_gr(img1, img2, use_post=False, use_gpen=False, ):
|
|
373 |
return out
|
374 |
|
375 |
|
376 |
-
def swap_video_gr(img1, target_path, frames=9999999):
|
377 |
root_dir = make_abs_path("./online_data")
|
378 |
req_id = uuid.uuid1().hex
|
379 |
data_dir = os.path.join(root_dir, req_id)
|
@@ -393,6 +394,7 @@ def swap_video_gr(img1, target_path, frames=9999999):
|
|
393 |
align_target='ffhq',
|
394 |
align_source='ffhq',
|
395 |
use_tddfav2=False,
|
|
|
396 |
)
|
397 |
|
398 |
pool_process = 170
|
@@ -438,6 +440,13 @@ def swap_video_gr(img1, target_path, frames=9999999):
|
|
438 |
return video_save_path
|
439 |
|
440 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
441 |
if __name__ == "__main__":
|
442 |
use_gpu = torch.cuda.is_available()
|
443 |
|
@@ -449,11 +458,11 @@ if __name__ == "__main__":
|
|
449 |
with gr.Column(scale=3):
|
450 |
image1_input = gr.Image(label='source')
|
451 |
image2_input = gr.Image(label='target')
|
452 |
-
|
453 |
-
|
454 |
with gr.Column(scale=2):
|
455 |
image_output = gr.Image()
|
456 |
-
image_button = gr.Button("Run: Face Swapping")
|
457 |
with gr.Tab("Video"):
|
458 |
with gr.Row():
|
459 |
with gr.Column(scale=3):
|
@@ -461,15 +470,16 @@ if __name__ == "__main__":
|
|
461 |
video_input = gr.Video(label='target')
|
462 |
with gr.Column(scale=2):
|
463 |
video_output = gr.Video()
|
464 |
-
|
|
|
465 |
image_button.click(
|
466 |
swap_image_gr,
|
467 |
-
inputs=[image1_input, image2_input,
|
468 |
outputs=image_output,
|
469 |
)
|
470 |
video_button.click(
|
471 |
swap_video_gr,
|
472 |
-
inputs=[image3_input, video_input],
|
473 |
outputs=video_output,
|
474 |
)
|
475 |
|
|
|
215 |
os.makedirs(out_path, exist_ok=True)
|
216 |
Image.fromarray(result.astype(np.uint8)).save(os.path.join(out_path, name))
|
217 |
save((result, M, original_target, os.path.join(out_path, "paste_back_" + name), None),
|
218 |
+
trick=trick, use_gpen=use_gpen)
|
219 |
|
220 |
|
221 |
def process_video(
|
|
|
230 |
frames=9999999,
|
231 |
use_tddfav2=False,
|
232 |
landmark_smooth="kalman",
|
233 |
+
use_gpen=False,
|
234 |
):
|
235 |
if isinstance(G, torch.nn.Module):
|
236 |
G.eval()
|
|
|
337 |
target = output[0][0] * 0.5 + 0.5
|
338 |
else:
|
339 |
target = output[0] * 0.5 + 0.5
|
340 |
+
targets.append(trick.gpen(np.array(tensor2pil_transform(target)), use_gpen=use_gpen))
|
341 |
Ms.append(M)
|
342 |
count += 1
|
343 |
if count > frames:
|
|
|
374 |
return out
|
375 |
|
376 |
|
377 |
+
def swap_video_gr(img1, target_path, use_gpen=False, frames=9999999):
|
378 |
root_dir = make_abs_path("./online_data")
|
379 |
req_id = uuid.uuid1().hex
|
380 |
data_dir = os.path.join(root_dir, req_id)
|
|
|
394 |
align_target='ffhq',
|
395 |
align_source='ffhq',
|
396 |
use_tddfav2=False,
|
397 |
+
use_gpen=use_gpen,
|
398 |
)
|
399 |
|
400 |
pool_process = 170
|
|
|
440 |
return video_save_path
|
441 |
|
442 |
|
443 |
+
css = """
|
444 |
+
#warning {background-color: #FFCCCB}
|
445 |
+
.feedback textarea {font-size: 24px !important}
|
446 |
+
.run_button {background-color: orange}
|
447 |
+
"""
|
448 |
+
|
449 |
+
|
450 |
if __name__ == "__main__":
|
451 |
use_gpu = torch.cuda.is_available()
|
452 |
|
|
|
458 |
with gr.Column(scale=3):
|
459 |
image1_input = gr.Image(label='source')
|
460 |
image2_input = gr.Image(label='target')
|
461 |
+
image_use_post = gr.Checkbox(label="Post-Process")
|
462 |
+
image_use_gpen = gr.Checkbox(label="Super Resolution")
|
463 |
with gr.Column(scale=2):
|
464 |
image_output = gr.Image()
|
465 |
+
image_button = gr.Button("Run: Face Swapping", elem_classes="run_button")
|
466 |
with gr.Tab("Video"):
|
467 |
with gr.Row():
|
468 |
with gr.Column(scale=3):
|
|
|
470 |
video_input = gr.Video(label='target')
|
471 |
with gr.Column(scale=2):
|
472 |
video_output = gr.Video()
|
473 |
+
video_use_gpen = gr.Checkbox(label="Super Resolution")
|
474 |
+
video_button = gr.Button("Run: Face Swapping", elem_classes="run_button")
|
475 |
image_button.click(
|
476 |
swap_image_gr,
|
477 |
+
inputs=[image1_input, image2_input, image_use_post, image_use_gpen],
|
478 |
outputs=image_output,
|
479 |
)
|
480 |
video_button.click(
|
481 |
swap_video_gr,
|
482 |
+
inputs=[image3_input, video_input, video_use_gpen],
|
483 |
outputs=video_output,
|
484 |
)
|
485 |
|
inference/utils.py
CHANGED
@@ -77,7 +77,7 @@ def get_detector(gpu_mode=False):
|
|
77 |
return tddfa, face_boxes
|
78 |
|
79 |
|
80 |
-
def save(x, trick=None,
|
81 |
""" Paste img to ori_img """
|
82 |
img, mat, ori_img, save_path, img_mask = x
|
83 |
if mat is None:
|
@@ -125,7 +125,7 @@ def save(x, trick=None, use_post=False):
|
|
125 |
ori_img = ori_img.astype(np.uint8)
|
126 |
|
127 |
if trick is not None:
|
128 |
-
ori_img = trick.gpen(ori_img,
|
129 |
|
130 |
Image.fromarray(ori_img).save(save_path)
|
131 |
|
|
|
77 |
return tddfa, face_boxes
|
78 |
|
79 |
|
80 |
+
def save(x, trick=None, use_gpen=False):
|
81 |
""" Paste img to ori_img """
|
82 |
img, mat, ori_img, save_path, img_mask = x
|
83 |
if mat is None:
|
|
|
125 |
ori_img = ori_img.astype(np.uint8)
|
126 |
|
127 |
if trick is not None:
|
128 |
+
ori_img = trick.gpen(ori_img, use_gpen)
|
129 |
|
130 |
Image.fromarray(ori_img).save(save_path)
|
131 |
|