Spaces:
Running
Running
Your Name
commited on
Commit
•
3d3e65e
1
Parent(s):
23faa2e
fix requirements
Browse files- app.py +18 -11
- requirements.txt +3 -1
app.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
import os
|
2 |
from io import BytesIO
|
3 |
|
@@ -5,7 +6,6 @@ import gradio as gr
|
|
5 |
import grpc
|
6 |
from PIL import Image
|
7 |
from cachetools import LRUCache
|
8 |
-
import hashlib
|
9 |
|
10 |
from inference_pb2 import HairSwapRequest, HairSwapResponse
|
11 |
from inference_pb2_grpc import HairSwapServiceStub
|
@@ -61,9 +61,13 @@ def resize(name):
|
|
61 |
return resize_inner
|
62 |
|
63 |
|
64 |
-
def swap_hair(face, shape, color, blending, poisson_iters, poisson_erosion
|
65 |
-
if not face
|
66 |
-
|
|
|
|
|
|
|
|
|
67 |
|
68 |
face_bytes, shape_bytes, color_bytes = map(lambda item: get_bytes(item), (face, shape, color))
|
69 |
|
@@ -81,7 +85,7 @@ def swap_hair(face, shape, color, blending, poisson_iters, poisson_erosion, prog
|
|
81 |
)
|
82 |
|
83 |
output = bytes_to_image(output.image)
|
84 |
-
return output
|
85 |
|
86 |
|
87 |
def get_demo():
|
@@ -98,22 +102,24 @@ def get_demo():
|
|
98 |
)
|
99 |
with gr.Row():
|
100 |
with gr.Column():
|
101 |
-
source = gr.Image(label="
|
102 |
with gr.Row():
|
103 |
-
shape = gr.Image(label="
|
104 |
-
color = gr.Image(label="
|
105 |
with gr.Accordion("Advanced Options", open=False):
|
106 |
blending = gr.Radio(["Article", "Alternative_v1", "Alternative_v2"], value='Article',
|
107 |
-
label="
|
108 |
poisson_iters = gr.Slider(0, 2500, value=0, step=1, label="Poisson iters",
|
109 |
info="The power of blending with the original image, helps to recover more details. Not included in the article, disabled by default.")
|
110 |
poisson_erosion = gr.Slider(1, 100, value=15, step=1, label="Poisson erosion",
|
111 |
info="Smooths out the blending area.")
|
112 |
align = gr.CheckboxGroup(["Face", "Shape", "Color"], value=["Face", "Shape", "Color"],
|
113 |
-
label="Image cropping [recommended]",
|
|
|
114 |
btn = gr.Button("Get the haircut")
|
115 |
with gr.Column():
|
116 |
output = gr.Image(label="Your result")
|
|
|
117 |
|
118 |
gr.Examples(examples=[["input/0.png", "input/1.png", "input/2.png"], ["input/6.png", "input/7.png", None],
|
119 |
["input/10.jpg", None, "input/11.jpg"]],
|
@@ -123,7 +129,8 @@ def get_demo():
|
|
123 |
shape.upload(fn=resize('Shape'), inputs=[shape, align], outputs=shape)
|
124 |
color.upload(fn=resize('Color'), inputs=[color, align], outputs=color)
|
125 |
|
126 |
-
btn.click(fn=swap_hair, inputs=[source, shape, color, blending, poisson_iters, poisson_erosion],
|
|
|
127 |
|
128 |
gr.Markdown('''To cite the paper by the authors
|
129 |
```
|
|
|
1 |
+
import hashlib
|
2 |
import os
|
3 |
from io import BytesIO
|
4 |
|
|
|
6 |
import grpc
|
7 |
from PIL import Image
|
8 |
from cachetools import LRUCache
|
|
|
9 |
|
10 |
from inference_pb2 import HairSwapRequest, HairSwapResponse
|
11 |
from inference_pb2_grpc import HairSwapServiceStub
|
|
|
61 |
return resize_inner
|
62 |
|
63 |
|
64 |
+
def swap_hair(face, shape, color, blending, poisson_iters, poisson_erosion):
|
65 |
+
if not face and not shape and not color:
|
66 |
+
return gr.update(visible=False), gr.update(value="Need to upload a face and at least a shape or color ❗", visible=True)
|
67 |
+
elif not face:
|
68 |
+
return gr.update(visible=False), gr.update(value="Need to upload a face ❗", visible=True)
|
69 |
+
elif not shape and not color:
|
70 |
+
return gr.update(visible=False), gr.update(value="Need to upload at least a shape or color ❗", visible=True)
|
71 |
|
72 |
face_bytes, shape_bytes, color_bytes = map(lambda item: get_bytes(item), (face, shape, color))
|
73 |
|
|
|
85 |
)
|
86 |
|
87 |
output = bytes_to_image(output.image)
|
88 |
+
return gr.update(value=output, visible=True), gr.update(visible=False)
|
89 |
|
90 |
|
91 |
def get_demo():
|
|
|
102 |
)
|
103 |
with gr.Row():
|
104 |
with gr.Column():
|
105 |
+
source = gr.Image(label="Source photo to try on the hairstyle", type="pil")
|
106 |
with gr.Row():
|
107 |
+
shape = gr.Image(label="Shape photo with desired hairstyle (optional)", type="pil")
|
108 |
+
color = gr.Image(label="Color photo with desired hair color (optional)", type="pil")
|
109 |
with gr.Accordion("Advanced Options", open=False):
|
110 |
blending = gr.Radio(["Article", "Alternative_v1", "Alternative_v2"], value='Article',
|
111 |
+
label="Color Encoder version", info="Selects a model for hair color transfer.")
|
112 |
poisson_iters = gr.Slider(0, 2500, value=0, step=1, label="Poisson iters",
|
113 |
info="The power of blending with the original image, helps to recover more details. Not included in the article, disabled by default.")
|
114 |
poisson_erosion = gr.Slider(1, 100, value=15, step=1, label="Poisson erosion",
|
115 |
info="Smooths out the blending area.")
|
116 |
align = gr.CheckboxGroup(["Face", "Shape", "Color"], value=["Face", "Shape", "Color"],
|
117 |
+
label="Image cropping [recommended]",
|
118 |
+
info="Selects which images to crop by face")
|
119 |
btn = gr.Button("Get the haircut")
|
120 |
with gr.Column():
|
121 |
output = gr.Image(label="Your result")
|
122 |
+
error_message = gr.Textbox(label="⚠️ Error ⚠️", visible=False, elem_classes="error-message")
|
123 |
|
124 |
gr.Examples(examples=[["input/0.png", "input/1.png", "input/2.png"], ["input/6.png", "input/7.png", None],
|
125 |
["input/10.jpg", None, "input/11.jpg"]],
|
|
|
129 |
shape.upload(fn=resize('Shape'), inputs=[shape, align], outputs=shape)
|
130 |
color.upload(fn=resize('Color'), inputs=[color, align], outputs=color)
|
131 |
|
132 |
+
btn.click(fn=swap_hair, inputs=[source, shape, color, blending, poisson_iters, poisson_erosion],
|
133 |
+
outputs=[output, error_message])
|
134 |
|
135 |
gr.Markdown('''To cite the paper by the authors
|
136 |
```
|
requirements.txt
CHANGED
@@ -3,4 +3,6 @@ face_alignment==1.3.4
|
|
3 |
addict==2.4.0
|
4 |
git+https://github.com/openai/CLIP.git
|
5 |
gdown==3.12.2
|
6 |
-
|
|
|
|
|
|
3 |
addict==2.4.0
|
4 |
git+https://github.com/openai/CLIP.git
|
5 |
gdown==3.12.2
|
6 |
+
grpcio==1.63.0
|
7 |
+
grpcio_tools=1.63.0
|
8 |
+
gradio=4.31.5
|