linoyts HF staff commited on
Commit
2d30f4b
1 Parent(s): 102409f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -2
app.py CHANGED
@@ -3,6 +3,7 @@ import spaces
3
  import torch
4
  from clip_slider_pipeline import CLIPSliderXL
5
  from diffusers import StableDiffusionXLPipeline, EulerDiscreteScheduler, AutoencoderKL
 
6
 
7
  #vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
8
  flash_pipe = StableDiffusionXLPipeline.from_pretrained("sd-community/sdxl-flash").to("cuda", torch.float16)
@@ -14,7 +15,7 @@ def generate(slider_x, slider_y, prompt, iterations, steps,
14
  x_concept_1, x_concept_2, y_concept_1, y_concept_2,
15
  avg_diff_x_1, avg_diff_x_2,
16
  avg_diff_y_1, avg_diff_y_2):
17
-
18
  # check if avg diff for directions need to be re-calculated
19
  if not sorted(slider_x) == sorted([x_concept_1, x_concept_2]):
20
  avg_diff = clip_slider.find_latent_direction(slider_x[0], slider_x[1], num_iterations=iterations)
@@ -23,8 +24,12 @@ def generate(slider_x, slider_y, prompt, iterations, steps,
23
  if not sorted(slider_y) == sorted([y_concept_1, y_concept_2]):
24
  avg_diff_2nd = clip_slider.find_latent_direction(slider_y[0], slider_y[1], num_iterations=iterations)
25
  y_concept_1, y_concept_2 = slider_y[0], slider_y[1]
26
-
 
 
27
  image = clip_slider.generate(prompt, scale=0, scale_2nd=0, num_inference_steps=steps, avg_diff=avg_diff, avg_diff_2nd=avg_diff_2nd)
 
 
28
  comma_concepts_x = ', '.join(slider_x)
29
  comma_concepts_y = ', '.join(slider_y)
30
 
 
3
  import torch
4
  from clip_slider_pipeline import CLIPSliderXL
5
  from diffusers import StableDiffusionXLPipeline, EulerDiscreteScheduler, AutoencoderKL
6
+ import time
7
 
8
  #vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
9
  flash_pipe = StableDiffusionXLPipeline.from_pretrained("sd-community/sdxl-flash").to("cuda", torch.float16)
 
15
  x_concept_1, x_concept_2, y_concept_1, y_concept_2,
16
  avg_diff_x_1, avg_diff_x_2,
17
  avg_diff_y_1, avg_diff_y_2):
18
+ start_time = time.time()
19
  # check if avg diff for directions need to be re-calculated
20
  if not sorted(slider_x) == sorted([x_concept_1, x_concept_2]):
21
  avg_diff = clip_slider.find_latent_direction(slider_x[0], slider_x[1], num_iterations=iterations)
 
24
  if not sorted(slider_y) == sorted([y_concept_1, y_concept_2]):
25
  avg_diff_2nd = clip_slider.find_latent_direction(slider_y[0], slider_y[1], num_iterations=iterations)
26
  y_concept_1, y_concept_2 = slider_y[0], slider_y[1]
27
+ end_time = time.time()
28
+ print(f"direction time: {end_time - start_time:.2f} ms")
29
+ start_time = time.time()
30
  image = clip_slider.generate(prompt, scale=0, scale_2nd=0, num_inference_steps=steps, avg_diff=avg_diff, avg_diff_2nd=avg_diff_2nd)
31
+ end_time = time.time()
32
+ print(f"generation time: {end_time - start_time:.2f} ms")
33
  comma_concepts_x = ', '.join(slider_x)
34
  comma_concepts_y = ', '.join(slider_y)
35