Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -756,7 +756,7 @@ print(result)
|
|
756 |
@spaces.GPU
|
757 |
@torch.no_grad()
|
758 |
def generate_image(
|
759 |
-
prompt, width, height, guidance, seed,
|
760 |
do_img2img, init_image, image2image_strength, resize_img,
|
761 |
progress=gr.Progress(track_tqdm=True),
|
762 |
):
|
@@ -786,7 +786,7 @@ def generate_image(
|
|
786 |
generator = torch.Generator(device=device).manual_seed(seed)
|
787 |
x = torch.randn(1, 16, 2 * math.ceil(height / 16), 2 * math.ceil(width / 16), device=device, dtype=torch.bfloat16, generator=generator)
|
788 |
|
789 |
-
num_steps =
|
790 |
timesteps = get_schedule(num_steps, (x.shape[-1] * x.shape[-2]) // 4, shift=True)
|
791 |
|
792 |
if do_img2img and init_image is not None:
|
@@ -876,6 +876,13 @@ def create_demo():
|
|
876 |
width = gr.Slider(minimum=128, maximum=2048, step=64, label="Width", value=1360)
|
877 |
height = gr.Slider(minimum=128, maximum=2048, step=64, label="Height", value=768)
|
878 |
guidance = gr.Slider(minimum=1.0, maximum=5.0, step=0.1, label="Guidance", value=3.5)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
879 |
seed = gr.Number(label="Seed", precision=-1)
|
880 |
do_img2img = gr.Checkbox(label="Image to Image", value=False)
|
881 |
init_image = gr.Image(label="Input Image", visible=False)
|
@@ -895,7 +902,7 @@ def create_demo():
|
|
895 |
|
896 |
generate_button.click(
|
897 |
fn=generate_image,
|
898 |
-
inputs=[prompt, width, height, guidance, seed, do_img2img, init_image, image2image_strength, resize_img],
|
899 |
outputs=[output_image, output_seed]
|
900 |
)
|
901 |
|
|
|
756 |
@spaces.GPU
|
757 |
@torch.no_grad()
|
758 |
def generate_image(
|
759 |
+
prompt, width, height, guidance, inference_steps, seed,
|
760 |
do_img2img, init_image, image2image_strength, resize_img,
|
761 |
progress=gr.Progress(track_tqdm=True),
|
762 |
):
|
|
|
786 |
generator = torch.Generator(device=device).manual_seed(seed)
|
787 |
x = torch.randn(1, 16, 2 * math.ceil(height / 16), 2 * math.ceil(width / 16), device=device, dtype=torch.bfloat16, generator=generator)
|
788 |
|
789 |
+
num_steps = inference_steps
|
790 |
timesteps = get_schedule(num_steps, (x.shape[-1] * x.shape[-2]) // 4, shift=True)
|
791 |
|
792 |
if do_img2img and init_image is not None:
|
|
|
876 |
width = gr.Slider(minimum=128, maximum=2048, step=64, label="Width", value=1360)
|
877 |
height = gr.Slider(minimum=128, maximum=2048, step=64, label="Height", value=768)
|
878 |
guidance = gr.Slider(minimum=1.0, maximum=5.0, step=0.1, label="Guidance", value=3.5)
|
879 |
+
inference_steps = gr.Slider(
|
880 |
+
label="Inference steps",
|
881 |
+
minimum=1,
|
882 |
+
maximum=30,
|
883 |
+
step=1,
|
884 |
+
value=16,
|
885 |
+
)
|
886 |
seed = gr.Number(label="Seed", precision=-1)
|
887 |
do_img2img = gr.Checkbox(label="Image to Image", value=False)
|
888 |
init_image = gr.Image(label="Input Image", visible=False)
|
|
|
902 |
|
903 |
generate_button.click(
|
904 |
fn=generate_image,
|
905 |
+
inputs=[prompt, width, height, guidance, inference_steps, seed, do_img2img, init_image, image2image_strength, resize_img],
|
906 |
outputs=[output_image, output_seed]
|
907 |
)
|
908 |
|