Spaces:
Runtime error
Runtime error
File size: 5,284 Bytes
7c4a89c fc928e3 7c4a89c fc928e3 93b7099 fc928e3 7c4a89c fc928e3 7c4a89c fc928e3 2038182 fc928e3 7c4a89c fc928e3 2038182 fc928e3 7c4a89c 93b7099 fc928e3 7c4a89c fc928e3 7c4a89c fc928e3 7c4a89c fc928e3 7c4a89c fc928e3 7c4a89c fc928e3 7c4a89c fc928e3 7c4a89c fc928e3 7c4a89c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 |
import functools
import os
import shutil
import sys
import git
import gradio as gr
import numpy as np
import torch as torch
from PIL import Image
from gradio_imageslider import ImageSlider
import spaces
REPO_URL = "https://github.com/lemonaddie/geowizard.git"
CHECKPOINT = "lemonaddie/Geowizard"
REPO_DIR = "geowizard"
if os.path.isdir(REPO_DIR):
shutil.rmtree(REPO_DIR)
repo = git.Repo.clone_from(REPO_URL, REPO_DIR)
sys.path.append(os.path.join(os.getcwd(), REPO_DIR))
from pipeline.depth_normal_pipeline_clip_cfg import DepthNormalEstimationPipeline
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
pipe = DepthNormalEstimationPipeline.from_pretrained(CHECKPOINT)
try:
import xformers
pipe.enable_xformers_memory_efficient_attention()
except:
pass # run without xformers
pipe = pipe.to(device)
#run_demo_server(pipe)
@spaces.GPU
def depth_normal(img,
denoising_steps,
ensemble_size,
processing_res,
guidance_scale,
domain):
img = img.resize((processing_res, processing_res), Image.Resampling.LANCZOS)
pipe_out = pipe(
img,
denoising_steps=denoising_steps,
ensemble_size=ensemble_size,
processing_res=processing_res,
batch_size=0,
guidance_scale=guidance_scale,
domain=domain,
show_progress_bar=True,
)
depth_colored = pipe_out.depth_colored
normal_colored = pipe_out.normal_colored
return depth_colored, normal_colored
def run_demo():
custom_theme = gr.themes.Soft(primary_hue="blue").set(
button_secondary_background_fill="*neutral_100",
button_secondary_background_fill_hover="*neutral_200")
custom_css = '''#disp_image {
text-align: center; /* Horizontally center the content */
}'''
with gr.Blocks(title=_TITLE, theme=custom_theme, css=custom_css) as demo:
with gr.Row():
with gr.Column(scale=1):
gr.Markdown('# ' + _TITLE)
gr.Markdown(_DESCRIPTION)
with gr.Row(variant='panel'):
with gr.Column(scale=1):
input_image = gr.Image(type='pil', image_mode='RGBA', height=320, label='Input image', tool=None)
example_folder = os.path.join(os.path.dirname(__file__), "./files")
example_fns = [os.path.join(example_folder, example) for example in os.listdir(example_folder)]
gr.Examples(
examples=example_fns,
inputs=[input_image],
# outputs=[input_image],
cache_examples=False,
label='Examples (click one of the images below to start)',
examples_per_page=30
)
with gr.Column(scale=1):
with gr.Accordion('Advanced options', open=True):
with gr.Row():
domain = gr.Radio(
[
("Outdoor", "outdoor"),
("Indoor", "indoor"),
("Object", "object"),
],
label="Data Domain",
value="indoor",
)
guidance_scale = gr.Slider(
label="Classifier Free Guidance Scale",
minimum=1,
maximum=5,
step=1,
value=3,
)
denoise_steps = gr.Slider(
label="Number of denoising steps",
minimum=1,
maximum=20,
step=1,
value=10,
)
ensemble_size = gr.Slider(
label="Ensemble size",
minimum=1,
maximum=15,
step=1,
value=1,
)
processing_res = gr.Radio(
[
("Native", 0),
("Recommended", 768),
],
label="Processing resolution",
value=768,
)
run_btn = gr.Button('Generate', variant='primary', interactive=True)
with gr.Row():
depth = gr.Image(interactive=False, height=384, show_label=False)
with gr.Row():
normal = gr.Image(interactive=False, height=384, show_label=False)
run_btn.success(fn=partial(depth_normal),
inputs=[input_image, denoising_steps,
ensemble_size,
processing_res,
guidance_scale,
domain],
outputs=[depth, normal]
)
demo.queue().launch(share=True, max_threads=80)
if __name__ == '__main__':
fire.Fire(run_demo)
|