Spaces:
Running
on
Zero
Running
on
Zero
Change default num_steps
Browse files
app.py
CHANGED
@@ -9,7 +9,7 @@ import gradio as gr
|
|
9 |
import numpy as np
|
10 |
import PIL.Image
|
11 |
import torch
|
12 |
-
from diffusers import DiffusionPipeline
|
13 |
|
14 |
DESCRIPTION = "# SD-XL"
|
15 |
if not torch.cuda.is_available():
|
@@ -24,8 +24,10 @@ ENABLE_REFINER = os.getenv("ENABLE_REFINER", "1") == "1"
|
|
24 |
|
25 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
26 |
if torch.cuda.is_available():
|
|
|
27 |
pipe = DiffusionPipeline.from_pretrained(
|
28 |
"stabilityai/stable-diffusion-xl-base-1.0",
|
|
|
29 |
torch_dtype=torch.float16,
|
30 |
use_safetensors=True,
|
31 |
variant="fp16",
|
@@ -33,6 +35,7 @@ if torch.cuda.is_available():
|
|
33 |
if ENABLE_REFINER:
|
34 |
refiner = DiffusionPipeline.from_pretrained(
|
35 |
"stabilityai/stable-diffusion-xl-refiner-1.0",
|
|
|
36 |
torch_dtype=torch.float16,
|
37 |
use_safetensors=True,
|
38 |
variant="fp16",
|
@@ -75,8 +78,8 @@ def generate(
|
|
75 |
height: int = 1024,
|
76 |
guidance_scale_base: float = 5.0,
|
77 |
guidance_scale_refiner: float = 5.0,
|
78 |
-
num_inference_steps_base: int =
|
79 |
-
num_inference_steps_refiner: int =
|
80 |
apply_refiner: bool = False,
|
81 |
) -> PIL.Image.Image:
|
82 |
generator = torch.Generator().manual_seed(seed)
|
@@ -211,7 +214,7 @@ with gr.Blocks(css="style.css") as demo:
|
|
211 |
minimum=10,
|
212 |
maximum=100,
|
213 |
step=1,
|
214 |
-
value=
|
215 |
)
|
216 |
with gr.Row(visible=False) as refiner_params:
|
217 |
guidance_scale_refiner = gr.Slider(
|
@@ -226,7 +229,7 @@ with gr.Blocks(css="style.css") as demo:
|
|
226 |
minimum=10,
|
227 |
maximum=100,
|
228 |
step=1,
|
229 |
-
value=
|
230 |
)
|
231 |
|
232 |
gr.Examples(
|
|
|
9 |
import numpy as np
|
10 |
import PIL.Image
|
11 |
import torch
|
12 |
+
from diffusers import AutoencoderKL, DiffusionPipeline
|
13 |
|
14 |
DESCRIPTION = "# SD-XL"
|
15 |
if not torch.cuda.is_available():
|
|
|
24 |
|
25 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
26 |
if torch.cuda.is_available():
|
27 |
+
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
28 |
pipe = DiffusionPipeline.from_pretrained(
|
29 |
"stabilityai/stable-diffusion-xl-base-1.0",
|
30 |
+
vae=vae,
|
31 |
torch_dtype=torch.float16,
|
32 |
use_safetensors=True,
|
33 |
variant="fp16",
|
|
|
35 |
if ENABLE_REFINER:
|
36 |
refiner = DiffusionPipeline.from_pretrained(
|
37 |
"stabilityai/stable-diffusion-xl-refiner-1.0",
|
38 |
+
vae=vae,
|
39 |
torch_dtype=torch.float16,
|
40 |
use_safetensors=True,
|
41 |
variant="fp16",
|
|
|
78 |
height: int = 1024,
|
79 |
guidance_scale_base: float = 5.0,
|
80 |
guidance_scale_refiner: float = 5.0,
|
81 |
+
num_inference_steps_base: int = 25,
|
82 |
+
num_inference_steps_refiner: int = 25,
|
83 |
apply_refiner: bool = False,
|
84 |
) -> PIL.Image.Image:
|
85 |
generator = torch.Generator().manual_seed(seed)
|
|
|
214 |
minimum=10,
|
215 |
maximum=100,
|
216 |
step=1,
|
217 |
+
value=25,
|
218 |
)
|
219 |
with gr.Row(visible=False) as refiner_params:
|
220 |
guidance_scale_refiner = gr.Slider(
|
|
|
229 |
minimum=10,
|
230 |
maximum=100,
|
231 |
step=1,
|
232 |
+
value=25,
|
233 |
)
|
234 |
|
235 |
gr.Examples(
|