linoyts HF staff commited on
Commit
50d5527
1 Parent(s): 3409336

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -1937,7 +1937,7 @@ pipe_inv = LEditsPPPipelineStableDiffusionXL.from_pretrained(
1937
  "stabilityai/stable-diffusion-xl-base-1.0", vae=vae,
1938
  torch_dtype=torch.float16
1939
  )
1940
- clip_slider_inv = CLIPSliderXL(sd_pipe=pipe_inv,device=torch.device("cuda"))
1941
 
1942
  @spaces.GPU(duration=120)
1943
  def generate(slider_x, slider_y, prompt, seed, iterations, steps, guidance_scale,
@@ -1978,7 +1978,7 @@ def generate(slider_x, slider_y, prompt, seed, iterations, steps, guidance_scale
1978
  elif img2img_type=="ip adapter" and img is not None:
1979
  image = clip_slider.generate(prompt, guidance_scale=guidance_scale, ip_adapter_image=img, scale=0, scale_2nd=0, seed=seed, num_inference_steps=steps, avg_diff=(avg_diff_0,avg_diff_1), avg_diff_2nd=(avg_diff_2nd_0,avg_diff_2nd_1))
1980
  elif img2img_type=="inversion":
1981
- image = clip_slider.generate(prompt, guidance_scale=guidance_scale, scale=0, scale_2nd=0, seed=seed, num_inference_steps=steps, avg_diff=(avg_diff_0,avg_diff_1), avg_diff_2nd=(avg_diff_2nd_0,avg_diff_2nd_1), init_latents = init_latents, zs=zs, edit_threshold=edit_threshold, edit_guidance_scale = edit_guidance_scale)
1982
  else: # text to image
1983
  image = clip_slider.generate(prompt, guidance_scale=guidance_scale, scale=0, scale_2nd=0, seed=seed, num_inference_steps=steps, avg_diff=(avg_diff_0,avg_diff_1), avg_diff_2nd=(avg_diff_2nd_0,avg_diff_2nd_1))
1984
 
@@ -2010,7 +2010,7 @@ def update_scales(x,y,prompt,seed, steps, guidance_scale,
2010
  elif img2img_type=="ip adapter" and img is not None:
2011
  image = clip_slider.generate(prompt, guidance_scale=guidance_scale, ip_adapter_image=img, scale=x, scale_2nd=y, seed=seed, num_inference_steps=steps, avg_diff=avg_diff,avg_diff_2nd=avg_diff_2nd)
2012
  elif img2img_type=="inversion":
2013
- image = clip_slider.generate(prompt, guidance_scale=guidance_scale, scale=x, scale_2nd=y, seed=seed, num_inference_steps=steps, avg_diff=(avg_diff_0,avg_diff_1), avg_diff_2nd=(avg_diff_2nd_0,avg_diff_2nd_1), edit_threshold=edit_threshold, edit_guidance_scale = edit_guidance_scale, init_latents = init_latents, zs=zs)
2014
  else:
2015
  image = clip_slider.generate(prompt, guidance_scale=guidance_scale, scale=x, scale_2nd=y, seed=seed, num_inference_steps=steps, avg_diff=avg_diff,avg_diff_2nd=avg_diff_2nd)
2016
  return image
 
1937
  "stabilityai/stable-diffusion-xl-base-1.0", vae=vae,
1938
  torch_dtype=torch.float16
1939
  )
1940
+ clip_slider_inv = CLIPSliderXL_inv(sd_pipe=pipe_inv,device=torch.device("cuda"))
1941
 
1942
  @spaces.GPU(duration=120)
1943
  def generate(slider_x, slider_y, prompt, seed, iterations, steps, guidance_scale,
 
1978
  elif img2img_type=="ip adapter" and img is not None:
1979
  image = clip_slider.generate(prompt, guidance_scale=guidance_scale, ip_adapter_image=img, scale=0, scale_2nd=0, seed=seed, num_inference_steps=steps, avg_diff=(avg_diff_0,avg_diff_1), avg_diff_2nd=(avg_diff_2nd_0,avg_diff_2nd_1))
1980
  elif img2img_type=="inversion":
1981
+ image = clip_slider.generate(prompt, guidance_scale=guidance_scale, scale=0, scale_2nd=0, seed=seed, avg_diff=(avg_diff_0,avg_diff_1), avg_diff_2nd=(avg_diff_2nd_0,avg_diff_2nd_1), init_latents = init_latents, zs=zs, edit_threshold=[edit_threshold], edit_guidance_scale = [edit_guidance_scale])
1982
  else: # text to image
1983
  image = clip_slider.generate(prompt, guidance_scale=guidance_scale, scale=0, scale_2nd=0, seed=seed, num_inference_steps=steps, avg_diff=(avg_diff_0,avg_diff_1), avg_diff_2nd=(avg_diff_2nd_0,avg_diff_2nd_1))
1984
 
 
2010
  elif img2img_type=="ip adapter" and img is not None:
2011
  image = clip_slider.generate(prompt, guidance_scale=guidance_scale, ip_adapter_image=img, scale=x, scale_2nd=y, seed=seed, num_inference_steps=steps, avg_diff=avg_diff,avg_diff_2nd=avg_diff_2nd)
2012
  elif img2img_type=="inversion":
2013
+ image = clip_slider.generate(prompt, guidance_scale=guidance_scale, scale=x, scale_2nd=y, seed=seed, avg_diff=(avg_diff_0,avg_diff_1), avg_diff_2nd=(avg_diff_2nd_0,avg_diff_2nd_1), edit_threshold=[edit_threshold], edit_guidance_scale = [edit_guidance_scale], init_latents = init_latents, zs=zs)
2014
  else:
2015
  image = clip_slider.generate(prompt, guidance_scale=guidance_scale, scale=x, scale_2nd=y, seed=seed, num_inference_steps=steps, avg_diff=avg_diff,avg_diff_2nd=avg_diff_2nd)
2016
  return image