Spaces:
Running
on
Zero
Running
on
Zero
File size: 2,235 Bytes
74a2a96 b7e10c3 74a2a96 84a2328 74a2a96 92d00b1 b7e10c3 8ae698c b7e10c3 fad088c 7810d79 74a2a96 ce447a5 59f6668 84a2328 74a2a96 b7e10c3 6dba575 b7e10c3 6dba575 74a2a96 b7e10c3 6dba575 74a2a96 b7e10c3 74a2a96 b7e10c3 74a2a96 59f6668 b7e10c3 74a2a96 df258c6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
import gradio as gr
from diffusers.utils import load_image
import spaces
from panna.pipeline import PipelineDepth2ImageV2
model = PipelineDepth2ImageV2()
title = ("# [Depth2Image](https://huggingface.co/stabilityai/stable-diffusion-2-depth) with [DepthAnythingV2](https://huggingface.co/depth-anything/Depth-Anything-V2-Large-hf)\n"
"Depth2Image with depth map predicted by DepthAnything V2. The demo is part of [panna](https://github.com/asahi417/panna) project.")
example_files = []
for n in range(1, 10):
load_image(f"https://huggingface.co/spaces/depth-anything/Depth-Anything-V2/resolve/main/assets/examples/demo{n:0>2}.jpg").save(f"demo{n:0>2}.jpg")
example_files.append(f"demo{n:0>2}.jpg")
@spaces.GPU()
def infer(init_image, prompt, negative_prompt, seed, guidance_scale, num_inference_steps):
return model(
init_image,
prompt=prompt,
negative_prompt=negative_prompt,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
seed=seed
)
with gr.Blocks() as demo:
gr.Markdown(title)
with gr.Row():
prompt = gr.Text(label="Prompt", show_label=True, max_lines=1, placeholder="Enter your prompt", container=False)
run_button = gr.Button("Run", scale=0)
with gr.Row():
init_image = gr.Image(label="Input Image", type='pil')
result = gr.Image(label="Result")
with gr.Accordion("Advanced Settings", open=False):
negative_prompt = gr.Text(label="Negative Prompt", max_lines=1, placeholder="Enter a negative prompt")
seed = gr.Slider(label="Seed", minimum=0, maximum=1_000_000, step=1, value=0)
with gr.Row():
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.0, maximum=10.0, step=0.1, value=7.5)
num_inference_steps = gr.Slider(label="Inference steps", minimum=1, maximum=50, step=1, value=50)
examples = gr.Examples(examples=example_files, inputs=[init_image])
gr.on(
triggers=[run_button.click, prompt.submit, negative_prompt.submit],
fn=infer,
inputs=[init_image, prompt, negative_prompt, seed, guidance_scale, num_inference_steps],
outputs=[result]
)
demo.launch(server_name="0.0.0.0")
|