Update app.py
Browse files
app.py
CHANGED
@@ -26,21 +26,20 @@ torch.cuda.empty_cache()
|
|
26 |
def generate_image(prompt, seed=42, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, randomize_seed=False, num_inference_steps=2, progress=gr.Progress(track_tqdm=True)):
|
27 |
if randomize_seed:
|
28 |
seed = random.randint(0, MAX_SEED)
|
29 |
-
generator = torch.Generator().manual_seed(int(float(seed)))
|
30 |
|
31 |
start_time = time.time()
|
32 |
|
33 |
# Only generate the last image in the sequence
|
34 |
-
|
35 |
prompt=prompt,
|
36 |
-
guidance_scale=0, # as Flux schnell is guidance free
|
37 |
-
num_inference_steps=num_inference_steps,
|
38 |
width=width,
|
39 |
height=height,
|
|
|
40 |
generator=generator
|
41 |
-
)
|
42 |
-
|
43 |
-
|
44 |
|
45 |
# Example prompts
|
46 |
examples = [
|
|
|
26 |
def generate_image(prompt, seed=42, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, randomize_seed=False, num_inference_steps=2, progress=gr.Progress(track_tqdm=True)):
|
27 |
if randomize_seed:
|
28 |
seed = random.randint(0, MAX_SEED)
|
29 |
+
generator = torch.Generator(device="cuda").manual_seed(int(float(seed)))
|
30 |
|
31 |
start_time = time.time()
|
32 |
|
33 |
# Only generate the last image in the sequence
|
34 |
+
img = pipe.generate_images(
|
35 |
prompt=prompt,
|
|
|
|
|
36 |
width=width,
|
37 |
height=height,
|
38 |
+
num_inference_steps=num_inference_steps,
|
39 |
generator=generator
|
40 |
+
)
|
41 |
+
latency = f"Latency: {(time.time()-start_time):.2f} seconds"
|
42 |
+
return img, seed, latency
|
43 |
|
44 |
# Example prompts
|
45 |
examples = [
|