linoyts HF staff commited on
Commit
79bd7e1
1 Parent(s): 0f97695

add steps & iterations params (#2)

Browse files

- add steps & iterations params (198def9bbd7c4789dc1fe7525574bb386668499c)
- Update clip_slider_pipeline.py (1efd0cb7516df60b50fe99af918beec5b3fd29f5)

Files changed (2) hide show
  1. app.py +12 -10
  2. clip_slider_pipeline.py +14 -6
app.py CHANGED
@@ -7,24 +7,24 @@ from diffusers import StableDiffusionXLPipeline, EulerDiscreteScheduler, Autoen
7
  #vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
8
  flash_pipe = StableDiffusionXLPipeline.from_pretrained("sd-community/sdxl-flash").to("cuda", torch.float16)
9
  flash_pipe.scheduler = EulerDiscreteScheduler.from_config(flash_pipe.scheduler.config)
10
- clip_slider = CLIPSliderXL(flash_pipe, device=torch.device("cuda"), iterations=100)
11
 
12
  @spaces.GPU
13
- def generate(slider_x, slider_y, prompt,
14
  x_concept_1, x_concept_2, y_concept_1, y_concept_2,
15
  avg_diff_x_1, avg_diff_x_2,
16
  avg_diff_y_1, avg_diff_y_2):
17
 
18
  # check if avg diff for directions need to be re-calculated
19
  if not sorted(slider_x) == sorted([x_concept_1, x_concept_2]):
20
- avg_diff = clip_slider.find_latent_direction(slider_x[0], slider_x[1])
21
  x_concept_1, x_concept_2 = slider_x[0], slider_x[1]
22
 
23
  if not sorted(slider_y) == sorted([y_concept_1, y_concept_2]):
24
- avg_diff_2nd = clip_slider.find_latent_direction(slider_y[0], slider_y[1])
25
  y_concept_1, y_concept_2 = slider_y[0], slider_y[1]
26
 
27
- image = clip_slider.generate(prompt, scale=0, scale_2nd=0, num_inference_steps=8, avg_diff=avg_diff, avg_diff_2nd=avg_diff_2nd)
28
  comma_concepts_x = ', '.join(slider_x)
29
  comma_concepts_y = ', '.join(slider_y)
30
 
@@ -36,17 +36,17 @@ def generate(slider_x, slider_y, prompt,
36
  return gr.update(label=comma_concepts_x, interactive=True),gr.update(label=comma_concepts_y, interactive=True), x_concept_1, x_concept_2, y_concept_1, y_concept_2, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2, image
37
 
38
  @spaces.GPU
39
- def update_x(x,y,prompt, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2):
40
  avg_diff = [avg_diff_x_1.cuda(), avg_diff_x_2.cuda()]
41
  avg_diff_2nd = [avg_diff_y_1.cuda(), avg_diff_y_2.cuda()]
42
- image = clip_slider.generate(prompt, scale=x, scale_2nd=y, num_inference_steps=8, avg_diff=avg_diff,avg_diff_2nd=avg_diff_2nd)
43
  return image
44
 
45
  @spaces.GPU
46
- def update_y(x,y,prompt, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2):
47
  avg_diff = [avg_diff_x_1.cuda(), avg_diff_x_2.cuda()]
48
  avg_diff_2nd = [avg_diff_y_1.cuda(), avg_diff_y_2.cuda()]
49
- image = clip_slider.generate(prompt, scale=x, scale_2nd=y, num_inference_steps=8, avg_diff=avg_diff,avg_diff_2nd=avg_diff_2nd)
50
  return image
51
 
52
  css = '''
@@ -96,10 +96,12 @@ with gr.Blocks(css=css) as demo:
96
  y = gr.Slider(minimum=-10, value=0, maximum=10, elem_id="y", interactive=False)
97
  output_image = gr.Image(elem_id="image_out")
98
  with gr.Accordion(label="advanced options"):
 
 
99
 
100
 
101
  submit.click(fn=generate,
102
- inputs=[slider_x, slider_y, prompt, x_concept_1, x_concept_2, y_concept_1, y_concept_2, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2],
103
  outputs=[x, y, x_concept_1, x_concept_2, y_concept_1, y_concept_2, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2, output_image])
104
  x.change(fn=update_x, inputs=[x,y, prompt, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2], outputs=[output_image])
105
  y.change(fn=update_y, inputs=[x,y, prompt, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2], outputs=[output_image])
 
7
  #vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
8
  flash_pipe = StableDiffusionXLPipeline.from_pretrained("sd-community/sdxl-flash").to("cuda", torch.float16)
9
  flash_pipe.scheduler = EulerDiscreteScheduler.from_config(flash_pipe.scheduler.config)
10
+ clip_slider = CLIPSliderXL(flash_pipe, device=torch.device("cuda"))
11
 
12
  @spaces.GPU
13
+ def generate(slider_x, slider_y, prompt, iterations, steps,
14
  x_concept_1, x_concept_2, y_concept_1, y_concept_2,
15
  avg_diff_x_1, avg_diff_x_2,
16
  avg_diff_y_1, avg_diff_y_2):
17
 
18
  # check if avg diff for directions need to be re-calculated
19
  if not sorted(slider_x) == sorted([x_concept_1, x_concept_2]):
20
+ avg_diff = clip_slider.find_latent_direction(slider_x[0], slider_x[1], iterations=iterations)
21
  x_concept_1, x_concept_2 = slider_x[0], slider_x[1]
22
 
23
  if not sorted(slider_y) == sorted([y_concept_1, y_concept_2]):
24
+ avg_diff_2nd = clip_slider.find_latent_direction(slider_y[0], slider_y[1], iterations=iterations)
25
  y_concept_1, y_concept_2 = slider_y[0], slider_y[1]
26
 
27
+ image = clip_slider.generate(prompt, scale=0, scale_2nd=0, num_inference_steps=steps, avg_diff=avg_diff, avg_diff_2nd=avg_diff_2nd)
28
  comma_concepts_x = ', '.join(slider_x)
29
  comma_concepts_y = ', '.join(slider_y)
30
 
 
36
  return gr.update(label=comma_concepts_x, interactive=True),gr.update(label=comma_concepts_y, interactive=True), x_concept_1, x_concept_2, y_concept_1, y_concept_2, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2, image
37
 
38
  @spaces.GPU
39
+ def update_x(x,y,prompt, steps, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2):
40
  avg_diff = [avg_diff_x_1.cuda(), avg_diff_x_2.cuda()]
41
  avg_diff_2nd = [avg_diff_y_1.cuda(), avg_diff_y_2.cuda()]
42
+ image = clip_slider.generate(prompt, scale=x, scale_2nd=y, num_inference_steps=steps, avg_diff=avg_diff,avg_diff_2nd=avg_diff_2nd)
43
  return image
44
 
45
  @spaces.GPU
46
+ def update_y(x,y,prompt, steps, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2):
47
  avg_diff = [avg_diff_x_1.cuda(), avg_diff_x_2.cuda()]
48
  avg_diff_2nd = [avg_diff_y_1.cuda(), avg_diff_y_2.cuda()]
49
+ image = clip_slider.generate(prompt, scale=x, scale_2nd=y, num_inference_steps=steps, avg_diff=avg_diff,avg_diff_2nd=avg_diff_2nd)
50
  return image
51
 
52
  css = '''
 
96
  y = gr.Slider(minimum=-10, value=0, maximum=10, elem_id="y", interactive=False)
97
  output_image = gr.Image(elem_id="image_out")
98
  with gr.Accordion(label="advanced options"):
99
+ iterations = gr.Slider(label = "num iterations", minimum=0, value=100, maximum=300)
100
+ steps = gr.Slider(label = "num inference steps", minimum=1, value=8, maximum=30)
101
 
102
 
103
  submit.click(fn=generate,
104
+ inputs=[slider_x, slider_y, prompt, iterations, steps, x_concept_1, x_concept_2, y_concept_1, y_concept_2, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2],
105
  outputs=[x, y, x_concept_1, x_concept_2, y_concept_1, y_concept_2, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2, output_image])
106
  x.change(fn=update_x, inputs=[x,y, prompt, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2], outputs=[output_image])
107
  y.change(fn=update_y, inputs=[x,y, prompt, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2], outputs=[output_image])
clip_slider_pipeline.py CHANGED
@@ -32,17 +32,21 @@ class CLIPSlider:
32
 
33
  def find_latent_direction(self,
34
  target_word:str,
35
- opposite:str):
 
36
 
37
  # lets identify a latent direction by taking differences between opposites
38
  # target_word = "happy"
39
  # opposite = "sad"
40
 
41
-
 
 
 
42
  with torch.no_grad():
43
  positives = []
44
  negatives = []
45
- for i in tqdm(range(self.iterations)):
46
  medium = random.choice(MEDIUMS)
47
  subject = random.choice(SUBJECTS)
48
  pos_prompt = f"a {medium} of a {target_word} {subject}"
@@ -145,19 +149,23 @@ class CLIPSliderXL(CLIPSlider):
145
 
146
  def find_latent_direction(self,
147
  target_word:str,
148
- opposite:str):
 
149
 
150
  # lets identify a latent direction by taking differences between opposites
151
  # target_word = "happy"
152
  # opposite = "sad"
153
-
 
 
 
154
 
155
  with torch.no_grad():
156
  positives = []
157
  negatives = []
158
  positives2 = []
159
  negatives2 = []
160
- for i in tqdm(range(self.iterations)):
161
  medium = random.choice(MEDIUMS)
162
  subject = random.choice(SUBJECTS)
163
  pos_prompt = f"a {medium} of a {target_word} {subject}"
 
32
 
33
  def find_latent_direction(self,
34
  target_word:str,
35
+ opposite:str,
36
+ num_iterations: int = None):
37
 
38
  # lets identify a latent direction by taking differences between opposites
39
  # target_word = "happy"
40
  # opposite = "sad"
41
 
42
+ if num_iterations is not None:
43
+ iterations = num_iterations
44
+ else:
45
+ iterations = self.iterations
46
  with torch.no_grad():
47
  positives = []
48
  negatives = []
49
+ for i in tqdm(range(iterations)):
50
  medium = random.choice(MEDIUMS)
51
  subject = random.choice(SUBJECTS)
52
  pos_prompt = f"a {medium} of a {target_word} {subject}"
 
149
 
150
  def find_latent_direction(self,
151
  target_word:str,
152
+ opposite:str,
153
+ num_iterations: int = None):
154
 
155
  # lets identify a latent direction by taking differences between opposites
156
  # target_word = "happy"
157
  # opposite = "sad"
158
+ if num_iterations is not None:
159
+ iterations = num_iterations
160
+ else:
161
+ iterations = self.iterations
162
 
163
  with torch.no_grad():
164
  positives = []
165
  negatives = []
166
  positives2 = []
167
  negatives2 = []
168
+ for i in tqdm(range(iterations)):
169
  medium = random.choice(MEDIUMS)
170
  subject = random.choice(SUBJECTS)
171
  pos_prompt = f"a {medium} of a {target_word} {subject}"