Spaces:
Running
Running
Doron Adler
commited on
Commit
β’
e042cd1
1
Parent(s):
12a763e
Exif transform
Browse files- Example00007.jpg β Example00006.jpg +0 -0
- app.py +2 -2
- face_detection.py +6 -6
Example00007.jpg β Example00006.jpg
RENAMED
File without changes
|
app.py
CHANGED
@@ -81,8 +81,8 @@ def inference(img):
|
|
81 |
|
82 |
|
83 |
title = "Apocalyptify"
|
84 |
-
description = "How will your face look after the
|
85 |
article = "<p style='text-align: center'><a href='https://github.com/justinpinkney/pixel2style2pixel/tree/nw' target='_blank'>Github Repo</a></p><p style='text-align: center'>samples: <img src='https://hf.space/gradioiframe/Norod78/Apocalyptify/file/Sample00001.jpg' alt='Sample00001'/><img src='https://hf.space/gradioiframe/Norod78/Apocalyptify/file/Sample00002.jpg' alt='Sample00002'/><img src='https://hf.space/gradioiframe/Norod78/Apocalyptify/file/Sample00003.jpg' alt='Sample00003'/><img src='https://hf.space/gradioiframe/Norod78/Apocalyptify/file/Sample00004.jpg' alt='Sample00004'/><img src='https://hf.space/gradioiframe/Norod78/Apocalyptify/file/Sample00005.jpg' alt='Sample00005'/></p><p>The Apocalypse model was fine tuned on a pre-trained Pixel2Style2Pixel model by Doron Adler</p>"
|
86 |
|
87 |
-
examples=[['Example00001.jpg'],['Example00002.jpg'],['Example00003.jpg'],['Example00004.jpg'],['Example00005.jpg']]
|
88 |
gr.Interface(inference, gr.inputs.Image(type="pil",shape=(256,256)), gr.outputs.Image(type="pil"),title=title,description=description,article=article,examples=examples,enable_queue=True).launch()
|
|
|
81 |
|
82 |
|
83 |
title = "Apocalyptify"
|
84 |
+
description = "How will your face look after the Apocalypse? Upload an image with a face, or click one of the examples below. If a face could not be detected, the output box will remain empty."
|
85 |
article = "<p style='text-align: center'><a href='https://github.com/justinpinkney/pixel2style2pixel/tree/nw' target='_blank'>Github Repo</a></p><p style='text-align: center'>samples: <img src='https://hf.space/gradioiframe/Norod78/Apocalyptify/file/Sample00001.jpg' alt='Sample00001'/><img src='https://hf.space/gradioiframe/Norod78/Apocalyptify/file/Sample00002.jpg' alt='Sample00002'/><img src='https://hf.space/gradioiframe/Norod78/Apocalyptify/file/Sample00003.jpg' alt='Sample00003'/><img src='https://hf.space/gradioiframe/Norod78/Apocalyptify/file/Sample00004.jpg' alt='Sample00004'/><img src='https://hf.space/gradioiframe/Norod78/Apocalyptify/file/Sample00005.jpg' alt='Sample00005'/></p><p>The Apocalypse model was fine tuned on a pre-trained Pixel2Style2Pixel model by Doron Adler</p>"
|
86 |
|
87 |
+
examples=[['Example00001.jpg'],['Example00002.jpg'],['Example00003.jpg'],['Example00004.jpg'],['Example00005.jpg'], ['Example00006.jpg']]
|
88 |
gr.Interface(inference, gr.inputs.Image(type="pil",shape=(256,256)), gr.outputs.Image(type="pil"),title=title,description=description,article=article,examples=examples,enable_queue=True).launch()
|
face_detection.py
CHANGED
@@ -14,6 +14,11 @@ detector = dlib.get_frontal_face_detector()
|
|
14 |
|
15 |
|
16 |
def align(image_in, face_index=0, output_size=256):
|
|
|
|
|
|
|
|
|
|
|
17 |
landmarks = list(get_landmarks(image_in))
|
18 |
n_faces = len(landmarks)
|
19 |
face_index = min(n_faces-1, face_index)
|
@@ -85,12 +90,7 @@ def image_align(src_img, face_landmarks, output_size=512, transform_size=2048, e
|
|
85 |
c = eye_avg + eye_to_mouth * em_scale
|
86 |
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
|
87 |
quad_orig = quad.copy()
|
88 |
-
qsize = np.hypot(*x) * 2
|
89 |
-
|
90 |
-
try:
|
91 |
-
src_img = ImageOps.exif_transpose(src_img)
|
92 |
-
except:
|
93 |
-
print("exif problem, not rotating")
|
94 |
|
95 |
img = src_img.convert('RGBA').convert('RGB')
|
96 |
|
|
|
14 |
|
15 |
|
16 |
def align(image_in, face_index=0, output_size=256):
|
17 |
+
try:
|
18 |
+
image_in = ImageOps.exif_transpose(image_in)
|
19 |
+
except:
|
20 |
+
print("exif problem, not rotating")
|
21 |
+
|
22 |
landmarks = list(get_landmarks(image_in))
|
23 |
n_faces = len(landmarks)
|
24 |
face_index = min(n_faces-1, face_index)
|
|
|
90 |
c = eye_avg + eye_to_mouth * em_scale
|
91 |
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
|
92 |
quad_orig = quad.copy()
|
93 |
+
qsize = np.hypot(*x) * 2
|
|
|
|
|
|
|
|
|
|
|
94 |
|
95 |
img = src_img.convert('RGBA').convert('RGB')
|
96 |
|