Spaces:
Runtime error
Runtime error
lemonaddie
commited on
Update app2.py
Browse files
app2.py
CHANGED
@@ -30,8 +30,6 @@ import cv2
|
|
30 |
import sys
|
31 |
sys.path.append("../")
|
32 |
from models.depth_normal_pipeline_clip import DepthNormalEstimationPipeline
|
33 |
-
#from models.depth_normal_pipeline_clip_cfg import DepthNormalEstimationPipeline
|
34 |
-
#from models.depth_normal_pipeline_clip_cfg_1 import DepthNormalEstimationPipeline as DepthNormalEstimationPipelineCFG
|
35 |
from utils.seed_all import seed_all
|
36 |
import matplotlib.pyplot as plt
|
37 |
from utils.de_normalized import align_scale_shift
|
@@ -46,7 +44,6 @@ import torchvision.transforms.functional as TF
|
|
46 |
from torchvision.transforms import InterpolationMode
|
47 |
|
48 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
49 |
-
#pipe = DepthNormalEstimationPipeline.from_pretrained(CHECKPOINT)
|
50 |
|
51 |
stable_diffusion_repo_path = '.'
|
52 |
vae = AutoencoderKL.from_pretrained(stable_diffusion_repo_path, subfolder='vae')
|
@@ -56,7 +53,6 @@ image_encoder = CLIPVisionModelWithProjection.from_pretrained(sd_image_variation
|
|
56 |
feature_extractor = CLIPImageProcessor.from_pretrained(sd_image_variations_diffusers_path, subfolder="feature_extractor")
|
57 |
|
58 |
unet = UNet2DConditionModel.from_pretrained('./wocfg/unet_ema')
|
59 |
-
unet_cfg = UNet2DConditionModel.from_pretrained('./cfg/unet_ema')
|
60 |
|
61 |
pipe = DepthNormalEstimationPipeline(vae=vae,
|
62 |
image_encoder=image_encoder,
|
@@ -64,13 +60,6 @@ pipe = DepthNormalEstimationPipeline(vae=vae,
|
|
64 |
unet=unet,
|
65 |
scheduler=scheduler)
|
66 |
|
67 |
-
# pipe_cfg = DepthNormalEstimationPipelineCFG(vae=vae,
|
68 |
-
# image_encoder=image_encoder,
|
69 |
-
# feature_extractor=feature_extractor,
|
70 |
-
# unet=unet_cfg,
|
71 |
-
# scheduler=scheduler)
|
72 |
-
|
73 |
-
|
74 |
try:
|
75 |
import xformers
|
76 |
pipe.enable_xformers_memory_efficient_attention()
|
@@ -78,8 +67,6 @@ except:
|
|
78 |
pass # run without xformers
|
79 |
|
80 |
pipe = pipe.to(device)
|
81 |
-
#pipe_cfg = pipe_cfg.to(device)
|
82 |
-
#run_demo_server(pipe)
|
83 |
|
84 |
@spaces.GPU
|
85 |
def depth_normal(img,
|
@@ -93,21 +80,6 @@ def depth_normal(img,
|
|
93 |
seed = int(seed)
|
94 |
torch.manual_seed(seed)
|
95 |
|
96 |
-
#img = img.resize((processing_res, processing_res), Image.Resampling.LANCZOS)
|
97 |
-
|
98 |
-
# if guidance_scale > 0:
|
99 |
-
# pipe_out = pipe_cfg(
|
100 |
-
# img,
|
101 |
-
# denoising_steps=denoising_steps,
|
102 |
-
# ensemble_size=ensemble_size,
|
103 |
-
# processing_res=processing_res,
|
104 |
-
# batch_size=0,
|
105 |
-
# guidance_scale=guidance_scale,
|
106 |
-
# domain=domain,
|
107 |
-
# show_progress_bar=True,
|
108 |
-
# )
|
109 |
-
|
110 |
-
# else:
|
111 |
pipe_out = pipe(
|
112 |
img,
|
113 |
denoising_steps=denoising_steps,
|
@@ -178,13 +150,6 @@ def run_demo():
|
|
178 |
label="Data Type (Must Select One matches your image)",
|
179 |
value="indoor",
|
180 |
)
|
181 |
-
# guidance_scale = gr.Slider(
|
182 |
-
# label="Classifier Free Guidance Scale, 0 Recommended for no guidance",
|
183 |
-
# minimum=0,
|
184 |
-
# maximum=5,
|
185 |
-
# step=1,
|
186 |
-
# value=0,
|
187 |
-
# )
|
188 |
denoising_steps = gr.Slider(
|
189 |
label="Number of denoising steps (More steps, better quality)",
|
190 |
minimum=1,
|
|
|
30 |
import sys
|
31 |
sys.path.append("../")
|
32 |
from models.depth_normal_pipeline_clip import DepthNormalEstimationPipeline
|
|
|
|
|
33 |
from utils.seed_all import seed_all
|
34 |
import matplotlib.pyplot as plt
|
35 |
from utils.de_normalized import align_scale_shift
|
|
|
44 |
from torchvision.transforms import InterpolationMode
|
45 |
|
46 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
47 |
|
48 |
stable_diffusion_repo_path = '.'
|
49 |
vae = AutoencoderKL.from_pretrained(stable_diffusion_repo_path, subfolder='vae')
|
|
|
53 |
feature_extractor = CLIPImageProcessor.from_pretrained(sd_image_variations_diffusers_path, subfolder="feature_extractor")
|
54 |
|
55 |
unet = UNet2DConditionModel.from_pretrained('./wocfg/unet_ema')
|
|
|
56 |
|
57 |
pipe = DepthNormalEstimationPipeline(vae=vae,
|
58 |
image_encoder=image_encoder,
|
|
|
60 |
unet=unet,
|
61 |
scheduler=scheduler)
|
62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
try:
|
64 |
import xformers
|
65 |
pipe.enable_xformers_memory_efficient_attention()
|
|
|
67 |
pass # run without xformers
|
68 |
|
69 |
pipe = pipe.to(device)
|
|
|
|
|
70 |
|
71 |
@spaces.GPU
|
72 |
def depth_normal(img,
|
|
|
80 |
seed = int(seed)
|
81 |
torch.manual_seed(seed)
|
82 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
pipe_out = pipe(
|
84 |
img,
|
85 |
denoising_steps=denoising_steps,
|
|
|
150 |
label="Data Type (Must Select One matches your image)",
|
151 |
value="indoor",
|
152 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
153 |
denoising_steps = gr.Slider(
|
154 |
label="Number of denoising steps (More steps, better quality)",
|
155 |
minimum=1,
|