Spaces:
Running
on
L4
Running
on
L4
add pre_face_align option.
Browse files
app.py
CHANGED
@@ -57,6 +57,9 @@ torch.hub.download_url_to_file(
|
|
57 |
torch.hub.download_url_to_file(
|
58 |
'https://replicate.com/api/models/sczhou/codeformer/files/7cf19c2c-e0cf-4712-9af8-cf5bdbb8d0ee/012.jpg',
|
59 |
'05.jpg')
|
|
|
|
|
|
|
60 |
|
61 |
def imread(img_path):
|
62 |
img = cv2.imread(img_path)
|
@@ -101,11 +104,11 @@ codeformer_net.eval()
|
|
101 |
|
102 |
os.makedirs('output', exist_ok=True)
|
103 |
|
104 |
-
def inference(image, background_enhance, face_upsample, upscale, codeformer_fidelity):
|
105 |
"""Run a single prediction on the model"""
|
106 |
try: # global try
|
107 |
# take the default setting for the demo
|
108 |
-
has_aligned =
|
109 |
only_center_face = False
|
110 |
draw_box = False
|
111 |
detection_model = "retinaface_resnet50"
|
@@ -114,6 +117,7 @@ def inference(image, background_enhance, face_upsample, upscale, codeformer_fide
|
|
114 |
background_enhance = background_enhance if background_enhance is not None else True
|
115 |
face_upsample = face_upsample if face_upsample is not None else True
|
116 |
upscale = upscale if (upscale is not None and upscale > 0) else 2
|
|
|
117 |
|
118 |
img = cv2.imread(str(image), cv2.IMREAD_COLOR)
|
119 |
print('\timage size:', img.shape)
|
@@ -271,6 +275,7 @@ td {
|
|
271 |
demo = gr.Interface(
|
272 |
inference, [
|
273 |
gr.Image(type="filepath", label="Input"),
|
|
|
274 |
gr.Checkbox(value=True, label="Background_Enhance"),
|
275 |
gr.Checkbox(value=True, label="Face_Upsample"),
|
276 |
gr.Number(value=2, label="Rescaling_Factor (up to 4)"),
|
@@ -282,11 +287,12 @@ demo = gr.Interface(
|
|
282 |
description=description,
|
283 |
article=article,
|
284 |
examples=[
|
285 |
-
['01.png', True, True, 2, 0.7],
|
286 |
-
['02.jpg', True, True, 2, 0.7],
|
287 |
-
['03.jpg', True, True, 2, 0.7],
|
288 |
-
['04.jpg', True, True, 2, 0.1],
|
289 |
-
['05.jpg', True, True, 2, 0.1]
|
|
|
290 |
])
|
291 |
|
292 |
DEBUG = os.getenv('DEBUG') == '1'
|
|
|
57 |
torch.hub.download_url_to_file(
|
58 |
'https://replicate.com/api/models/sczhou/codeformer/files/7cf19c2c-e0cf-4712-9af8-cf5bdbb8d0ee/012.jpg',
|
59 |
'05.jpg')
|
60 |
+
torch.hub.download_url_to_file(
|
61 |
+
'https://raw.githubusercontent.com/sczhou/CodeFormer/master/inputs/cropped_faces/0729.png',
|
62 |
+
'06.png')
|
63 |
|
64 |
def imread(img_path):
|
65 |
img = cv2.imread(img_path)
|
|
|
104 |
|
105 |
os.makedirs('output', exist_ok=True)
|
106 |
|
107 |
+
def inference(image, face_align, background_enhance, face_upsample, upscale, codeformer_fidelity):
|
108 |
"""Run a single prediction on the model"""
|
109 |
try: # global try
|
110 |
# take the default setting for the demo
|
111 |
+
has_aligned = not face_align
|
112 |
only_center_face = False
|
113 |
draw_box = False
|
114 |
detection_model = "retinaface_resnet50"
|
|
|
117 |
background_enhance = background_enhance if background_enhance is not None else True
|
118 |
face_upsample = face_upsample if face_upsample is not None else True
|
119 |
upscale = upscale if (upscale is not None and upscale > 0) else 2
|
120 |
+
upscale = 1 if has_aligned else upscale
|
121 |
|
122 |
img = cv2.imread(str(image), cv2.IMREAD_COLOR)
|
123 |
print('\timage size:', img.shape)
|
|
|
275 |
demo = gr.Interface(
|
276 |
inference, [
|
277 |
gr.Image(type="filepath", label="Input"),
|
278 |
+
gr.Checkbox(value=True, label="Pre_Face_Align"),
|
279 |
gr.Checkbox(value=True, label="Background_Enhance"),
|
280 |
gr.Checkbox(value=True, label="Face_Upsample"),
|
281 |
gr.Number(value=2, label="Rescaling_Factor (up to 4)"),
|
|
|
287 |
description=description,
|
288 |
article=article,
|
289 |
examples=[
|
290 |
+
['01.png', True, True, True, 2, 0.7],
|
291 |
+
['02.jpg', True, True, True, 2, 0.7],
|
292 |
+
['03.jpg', True, True, True, 2, 0.7],
|
293 |
+
['04.jpg', True, True, True, 2, 0.1],
|
294 |
+
['05.jpg', True, True, True, 2, 0.1],
|
295 |
+
['06.png', False, True, True, 1, 0.5]
|
296 |
])
|
297 |
|
298 |
DEBUG = os.getenv('DEBUG') == '1'
|