Spaces:
Running
on
Zero
Running
on
Zero
Upload folder using huggingface_hub
Browse files
app.py
CHANGED
@@ -3,9 +3,9 @@ from diffusers.utils import load_image
|
|
3 |
import spaces
|
4 |
import torch
|
5 |
from panna import Depth2Image, DepthAnythingV2
|
|
|
6 |
|
7 |
-
|
8 |
-
model_image = Depth2Image("stabilityai/stable-diffusion-2-depth")
|
9 |
title = ("# [Depth2Image](https://huggingface.co/stabilityai/stable-diffusion-2-depth) with [DepthAnythingV2](https://huggingface.co/depth-anything/Depth-Anything-V2-Large-hf)\n"
|
10 |
"Depth2Image with depth map predicted by DepthAnything V2. The demo is part of [panna](https://github.com/abacws-abacus/panna) project.")
|
11 |
example_files = []
|
@@ -16,18 +16,16 @@ for n in range(1, 10):
|
|
16 |
|
17 |
@spaces.GPU
|
18 |
def infer(init_image, prompt, negative_prompt, seed, width, height, guidance_scale, num_inference_steps):
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
prompt=[prompt],
|
24 |
-
negative_prompt=[negative_prompt],
|
25 |
guidance_scale=guidance_scale,
|
26 |
num_inference_steps=num_inference_steps,
|
27 |
height=height,
|
28 |
width=width,
|
29 |
seed=seed
|
30 |
-
)
|
31 |
|
32 |
|
33 |
with gr.Blocks() as demo:
|
@@ -54,4 +52,4 @@ with gr.Blocks() as demo:
|
|
54 |
inputs=[init_image, prompt, negative_prompt, seed, width, height, guidance_scale, num_inference_steps],
|
55 |
outputs=[result]
|
56 |
)
|
57 |
-
demo.launch()
|
|
|
3 |
import spaces
|
4 |
import torch
|
5 |
from panna import Depth2Image, DepthAnythingV2
|
6 |
+
from panna.pipeline import PipelineDepth2ImageV2
|
7 |
|
8 |
+
model = PipelineDepth2ImageV2(torch_dtype=None, variant=None)
|
|
|
9 |
title = ("# [Depth2Image](https://huggingface.co/stabilityai/stable-diffusion-2-depth) with [DepthAnythingV2](https://huggingface.co/depth-anything/Depth-Anything-V2-Large-hf)\n"
|
10 |
"Depth2Image with depth map predicted by DepthAnything V2. The demo is part of [panna](https://github.com/abacws-abacus/panna) project.")
|
11 |
example_files = []
|
|
|
16 |
|
17 |
@spaces.GPU
|
18 |
def infer(init_image, prompt, negative_prompt, seed, width, height, guidance_scale, num_inference_steps):
|
19 |
+
return model(
|
20 |
+
init_image,
|
21 |
+
prompt=prompt,
|
22 |
+
negative_prompt=negative_prompt,
|
|
|
|
|
23 |
guidance_scale=guidance_scale,
|
24 |
num_inference_steps=num_inference_steps,
|
25 |
height=height,
|
26 |
width=width,
|
27 |
seed=seed
|
28 |
+
)
|
29 |
|
30 |
|
31 |
with gr.Blocks() as demo:
|
|
|
52 |
inputs=[init_image, prompt, negative_prompt, seed, width, height, guidance_scale, num_inference_steps],
|
53 |
outputs=[result]
|
54 |
)
|
55 |
+
demo.launch(server_name="0.0.0.0")
|