Spaces:
Running
on
Zero
Running
on
Zero
adamelliotfields
commited on
Commit
•
10d9721
1
Parent(s):
1c11426
Progress bar improvements
Browse files- app.py +1 -1
- lib/inference.py +10 -7
app.py
CHANGED
@@ -109,7 +109,7 @@ async def generate_fn(*args):
|
|
109 |
*gen_args,
|
110 |
Info=gr.Info,
|
111 |
Error=gr.Error,
|
112 |
-
|
113 |
)
|
114 |
except RuntimeError:
|
115 |
raise gr.Error("Error: Please try again")
|
|
|
109 |
*gen_args,
|
110 |
Info=gr.Info,
|
111 |
Error=gr.Error,
|
112 |
+
Progress=gr.Progress,
|
113 |
)
|
114 |
except RuntimeError:
|
115 |
raise gr.Error("Error: Please try again")
|
lib/inference.py
CHANGED
@@ -3,8 +3,8 @@ import re
|
|
3 |
import time
|
4 |
from datetime import datetime
|
5 |
from itertools import product
|
6 |
-
from typing import Callable
|
7 |
|
|
|
8 |
import numpy as np
|
9 |
import spaces
|
10 |
import torch
|
@@ -120,9 +120,10 @@ def generate(
|
|
120 |
taesd=False,
|
121 |
freeu=False,
|
122 |
clip_skip=False,
|
123 |
-
Info
|
124 |
Error=Exception,
|
125 |
-
|
|
|
126 |
):
|
127 |
if not torch.cuda.is_available():
|
128 |
raise Error("CUDA not available")
|
@@ -147,21 +148,23 @@ def generate(
|
|
147 |
else:
|
148 |
IP_ADAPTER = ""
|
149 |
|
150 |
-
if
|
151 |
TQDM = False
|
152 |
-
|
|
|
153 |
else:
|
154 |
TQDM = True
|
|
|
155 |
|
156 |
def callback_on_step_end(pipeline, step, timestep, latents):
|
157 |
nonlocal CURRENT_STEP, CURRENT_IMAGE
|
158 |
-
if
|
159 |
return latents
|
160 |
strength = denoising_strength if KIND == "img2img" else 1
|
161 |
total_steps = min(int(inference_steps * strength), inference_steps)
|
162 |
|
163 |
CURRENT_STEP = step + 1
|
164 |
-
|
165 |
(CURRENT_STEP, total_steps),
|
166 |
desc=f"Generating image {CURRENT_IMAGE}/{num_images}",
|
167 |
)
|
|
|
3 |
import time
|
4 |
from datetime import datetime
|
5 |
from itertools import product
|
|
|
6 |
|
7 |
+
import gradio as gr
|
8 |
import numpy as np
|
9 |
import spaces
|
10 |
import torch
|
|
|
120 |
taesd=False,
|
121 |
freeu=False,
|
122 |
clip_skip=False,
|
123 |
+
Info=None,
|
124 |
Error=Exception,
|
125 |
+
Progress=None,
|
126 |
+
progress=gr.Progress(track_tqdm=True),
|
127 |
):
|
128 |
if not torch.cuda.is_available():
|
129 |
raise Error("CUDA not available")
|
|
|
148 |
else:
|
149 |
IP_ADAPTER = ""
|
150 |
|
151 |
+
if Progress is not None:
|
152 |
TQDM = False
|
153 |
+
progress_bar = Progress()
|
154 |
+
progress_bar((0, inference_steps), desc=f"Generating image {CURRENT_IMAGE}/{num_images}")
|
155 |
else:
|
156 |
TQDM = True
|
157 |
+
progress_bar = None
|
158 |
|
159 |
def callback_on_step_end(pipeline, step, timestep, latents):
|
160 |
nonlocal CURRENT_STEP, CURRENT_IMAGE
|
161 |
+
if Progress is None:
|
162 |
return latents
|
163 |
strength = denoising_strength if KIND == "img2img" else 1
|
164 |
total_steps = min(int(inference_steps * strength), inference_steps)
|
165 |
|
166 |
CURRENT_STEP = step + 1
|
167 |
+
progress_bar(
|
168 |
(CURRENT_STEP, total_steps),
|
169 |
desc=f"Generating image {CURRENT_IMAGE}/{num_images}",
|
170 |
)
|