Spaces:
Runtime error
Runtime error
fix
Browse files
app.py
CHANGED
@@ -63,7 +63,7 @@ if not IS_SPACES_ZERO:
|
|
63 |
# pipe.enable_xformers_memory_efficient_attention()
|
64 |
pipe.enable_model_cpu_offload()
|
65 |
pipe.enable_vae_tiling()
|
66 |
-
canny_torch = SobelOperator(
|
67 |
|
68 |
|
69 |
def pad_image(image):
|
@@ -96,17 +96,20 @@ def predict(
|
|
96 |
strength=1.0,
|
97 |
controlnet_start=0.0,
|
98 |
controlnet_end=1.0,
|
|
|
|
|
99 |
progress=gr.Progress(track_tqdm=True),
|
100 |
):
|
101 |
if IS_SPACES_ZERO:
|
102 |
apply_hidiffusion(pipe)
|
|
|
103 |
if input_image is None:
|
104 |
raise gr.Error("Please upload an image.")
|
105 |
padded_image = pad_image(input_image).resize((1024, 1024)).convert("RGB")
|
106 |
conditioning, pooled = compel([prompt, negative_prompt])
|
107 |
generator = torch.manual_seed(seed)
|
108 |
last_time = time.time()
|
109 |
-
canny_image = canny_torch(padded_image,
|
110 |
images = pipe(
|
111 |
image=padded_image,
|
112 |
control_image=canny_image,
|
@@ -126,7 +129,7 @@ def predict(
|
|
126 |
eta=1.0,
|
127 |
)
|
128 |
print(f"Time taken: {time.time() - last_time}")
|
129 |
-
return (padded_image, images.images[0])
|
130 |
|
131 |
|
132 |
css = """
|
@@ -145,6 +148,7 @@ with gr.Blocks(css=css) as demo:
|
|
145 |
|
146 |
[HiDiffusion](https://github.com/megvii-research/HiDiffusion) enables higher-resolution image generation.
|
147 |
You can upload an initial image and prompt to generate an enhanced version.
|
|
|
148 |
[Duplicate Space](https://huggingface.co/spaces/radames/Enhance-This-HiDiffusion-SDXL?duplicate=true) to avoid the queue.
|
149 |
|
150 |
<small>
|
@@ -217,11 +221,28 @@ You can upload an initial image and prompt to generate an enhanced version.
|
|
217 |
value=1.0,
|
218 |
label="ControlNet End",
|
219 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
220 |
|
221 |
btn = gr.Button()
|
222 |
with gr.Column(scale=2):
|
223 |
with gr.Group():
|
224 |
image_slider = ImageSlider(position=0.5)
|
|
|
|
|
|
|
225 |
inputs = [
|
226 |
image_input,
|
227 |
prompt,
|
@@ -234,7 +255,7 @@ You can upload an initial image and prompt to generate an enhanced version.
|
|
234 |
controlnet_start,
|
235 |
controlnet_end,
|
236 |
]
|
237 |
-
outputs = [image_slider]
|
238 |
btn.click(lambda x: None, inputs=None, outputs=image_slider).then(
|
239 |
predict, inputs=inputs, outputs=outputs, concurrency_limit=1
|
240 |
)
|
@@ -303,9 +324,9 @@ You can upload an initial image and prompt to generate an enhanced version.
|
|
303 |
],
|
304 |
[
|
305 |
"./examples/huggingface.jpg",
|
306 |
-
"photo realistic huggingface human
|
307 |
"blurry, ugly, duplicate, poorly drawn, deformed, mosaic, emoji cartoon, drawing, pixelated",
|
308 |
-
|
309 |
0.101,
|
310 |
25.206,
|
311 |
2,
|
|
|
63 |
# pipe.enable_xformers_memory_efficient_attention()
|
64 |
pipe.enable_model_cpu_offload()
|
65 |
pipe.enable_vae_tiling()
|
66 |
+
canny_torch = SobelOperator()
|
67 |
|
68 |
|
69 |
def pad_image(image):
|
|
|
96 |
strength=1.0,
|
97 |
controlnet_start=0.0,
|
98 |
controlnet_end=1.0,
|
99 |
+
canny_low=0.01,
|
100 |
+
canny_high=0.2,
|
101 |
progress=gr.Progress(track_tqdm=True),
|
102 |
):
|
103 |
if IS_SPACES_ZERO:
|
104 |
apply_hidiffusion(pipe)
|
105 |
+
canny_torch.to(device)
|
106 |
if input_image is None:
|
107 |
raise gr.Error("Please upload an image.")
|
108 |
padded_image = pad_image(input_image).resize((1024, 1024)).convert("RGB")
|
109 |
conditioning, pooled = compel([prompt, negative_prompt])
|
110 |
generator = torch.manual_seed(seed)
|
111 |
last_time = time.time()
|
112 |
+
canny_image = canny_torch(padded_image, canny_low, canny_high)
|
113 |
images = pipe(
|
114 |
image=padded_image,
|
115 |
control_image=canny_image,
|
|
|
129 |
eta=1.0,
|
130 |
)
|
131 |
print(f"Time taken: {time.time() - last_time}")
|
132 |
+
return (padded_image, images.images[0]), padded_image, canny_image
|
133 |
|
134 |
|
135 |
css = """
|
|
|
148 |
|
149 |
[HiDiffusion](https://github.com/megvii-research/HiDiffusion) enables higher-resolution image generation.
|
150 |
You can upload an initial image and prompt to generate an enhanced version.
|
151 |
+
SDXL Controlnet [TheMistoAI/MistoLine](https://huggingface.co/TheMistoAI/MistoLine)
|
152 |
[Duplicate Space](https://huggingface.co/spaces/radames/Enhance-This-HiDiffusion-SDXL?duplicate=true) to avoid the queue.
|
153 |
|
154 |
<small>
|
|
|
221 |
value=1.0,
|
222 |
label="ControlNet End",
|
223 |
)
|
224 |
+
canny_low = gr.Slider(
|
225 |
+
minimum=0,
|
226 |
+
maximum=1,
|
227 |
+
step=0.001,
|
228 |
+
value=0.0,
|
229 |
+
label="Canny Low Threshold",
|
230 |
+
)
|
231 |
+
controlnet_end = gr.Slider(
|
232 |
+
minimum=0.0,
|
233 |
+
maximum=1.0,
|
234 |
+
step=0.001,
|
235 |
+
value=0.2,
|
236 |
+
label="Canny High Threshold",
|
237 |
+
)
|
238 |
|
239 |
btn = gr.Button()
|
240 |
with gr.Column(scale=2):
|
241 |
with gr.Group():
|
242 |
image_slider = ImageSlider(position=0.5)
|
243 |
+
with gr.Row():
|
244 |
+
padded_image = gr.Image(type="pil", label="Padded Image")
|
245 |
+
canny_image = gr.Image(type="pil", label="Canny Image")
|
246 |
inputs = [
|
247 |
image_input,
|
248 |
prompt,
|
|
|
255 |
controlnet_start,
|
256 |
controlnet_end,
|
257 |
]
|
258 |
+
outputs = [image_slider, padded_image, canny_image]
|
259 |
btn.click(lambda x: None, inputs=None, outputs=image_slider).then(
|
260 |
predict, inputs=inputs, outputs=outputs, concurrency_limit=1
|
261 |
)
|
|
|
324 |
],
|
325 |
[
|
326 |
"./examples/huggingface.jpg",
|
327 |
+
"photo realistic huggingface human emoji costume, round, yellow, (human skin)+++ (human texture)+++",
|
328 |
"blurry, ugly, duplicate, poorly drawn, deformed, mosaic, emoji cartoon, drawing, pixelated",
|
329 |
+
12312353423,
|
330 |
0.101,
|
331 |
25.206,
|
332 |
2,
|