arthur-qiu commited on
Commit
582cd46
1 Parent(s): 72763fe
Files changed (1) hide show
  1. app.py +8 -46
app.py CHANGED
@@ -9,35 +9,7 @@ from pipeline_freescale import StableDiffusionXLPipeline
9
  from free_lunch_utils import register_free_upblock2d, register_free_crossattn_upblock2d
10
 
11
  @spaces.GPU(duration=120)
12
- def infer_gpu_fast(pipe, seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu):
13
- pipe = pipe.to("cuda")
14
- generator = torch.Generator(device='cuda')
15
- generator = generator.manual_seed(seed)
16
- if not disable_freeu:
17
- register_free_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
18
- register_free_crossattn_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
19
- result = pipe(prompt, negative_prompt=negative_prompt, generator=generator,
20
- num_inference_steps=ddim_steps, guidance_scale=guidance_scale,
21
- resolutions_list=resolutions_list, fast_mode=fast_mode, cosine_scale=cosine_scale,
22
- )
23
- return result
24
-
25
- @spaces.GPU(duration=240)
26
- def infer_gpu_mid(pipe, seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu):
27
- pipe = pipe.to("cuda")
28
- generator = torch.Generator(device='cuda')
29
- generator = generator.manual_seed(seed)
30
- if not disable_freeu:
31
- register_free_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
32
- register_free_crossattn_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
33
- result = pipe(prompt, negative_prompt=negative_prompt, generator=generator,
34
- num_inference_steps=ddim_steps, guidance_scale=guidance_scale,
35
- resolutions_list=resolutions_list, fast_mode=fast_mode, cosine_scale=cosine_scale,
36
- )
37
- return result
38
-
39
- @spaces.GPU(duration=360)
40
- def infer_gpu_slow(pipe, seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu):
41
  pipe = pipe.to("cuda")
42
  generator = torch.Generator(device='cuda')
43
  generator = generator.manual_seed(seed)
@@ -53,26 +25,16 @@ def infer_gpu_slow(pipe, seed, prompt, negative_prompt, ddim_steps, guidance_sca
53
  def infer(prompt, output_size, ddim_steps, guidance_scale, cosine_scale, seed, options, negative_prompt):
54
 
55
  disable_freeu = 'Disable FreeU' in options
56
- fast_mode = 'Fast Mode' in options
57
  if output_size == "2048 x 2048":
58
  resolutions_list = [[1024, 1024],
59
  [2048, 2048]]
60
- infer_gpu_part = infer_gpu_fast
61
- elif output_size == "2048 x 4096":
62
  resolutions_list = [[512, 1024],
63
- [1024, 2048],
64
- [2048, 4096]]
65
- infer_gpu_part = infer_gpu_mid
66
- elif output_size == "4096 x 2048":
67
  resolutions_list = [[1024, 512],
68
- [2048, 1024],
69
- [4096, 2048]]
70
- infer_gpu_part = infer_gpu_mid
71
- elif output_size == "4096 x 4096":
72
- resolutions_list = [[1024, 1024],
73
- [2048, 2048],
74
- [4096, 4096]]
75
- infer_gpu_part = infer_gpu_slow
76
 
77
  model_ckpt = "stabilityai/stable-diffusion-xl-base-1.0"
78
  pipe = StableDiffusionXLPipeline.from_pretrained(model_ckpt, torch_dtype=torch.float16)
@@ -198,7 +160,7 @@ with gr.Blocks(css=css) as demo:
198
  with gr.Row():
199
  with gr.Accordion('FreeScale Parameters (feel free to adjust these parameters based on your prompt): ', open=False):
200
  with gr.Row():
201
- output_size = gr.Dropdown(["2048 x 2048", "2048 x 4096", "4096 x 2048", "4096 x 4096"], value="2048 x 2048", label="Output Size (H x W)")
202
  with gr.Row():
203
  ddim_steps = gr.Slider(label='DDIM Steps',
204
  minimum=5,
@@ -222,7 +184,7 @@ with gr.Blocks(css=css) as demo:
222
  step=1,
223
  value=123)
224
  with gr.Row():
225
- options = gr.CheckboxGroup(['Disable FreeU', 'Fast Mode'], label='Options (NOT recommended to change)')
226
  with gr.Row():
227
  negative_prompt = gr.Textbox(label='Negative Prompt', value='blurry, ugly, duplicate, poorly drawn, deformed, mosaic')
228
 
 
9
  from free_lunch_utils import register_free_upblock2d, register_free_crossattn_upblock2d
10
 
11
  @spaces.GPU(duration=120)
12
+ def infer_gpu_part(pipe, seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  pipe = pipe.to("cuda")
14
  generator = torch.Generator(device='cuda')
15
  generator = generator.manual_seed(seed)
 
25
  def infer(prompt, output_size, ddim_steps, guidance_scale, cosine_scale, seed, options, negative_prompt):
26
 
27
  disable_freeu = 'Disable FreeU' in options
28
+ fast_mode = True
29
  if output_size == "2048 x 2048":
30
  resolutions_list = [[1024, 1024],
31
  [2048, 2048]]
32
+ elif output_size == "1024 x 2048":
 
33
  resolutions_list = [[512, 1024],
34
+ [1024, 2048]]
35
+ elif output_size == "2048 x 1024":
 
 
36
  resolutions_list = [[1024, 512],
37
+ [2048, 1024]]
 
 
 
 
 
 
 
38
 
39
  model_ckpt = "stabilityai/stable-diffusion-xl-base-1.0"
40
  pipe = StableDiffusionXLPipeline.from_pretrained(model_ckpt, torch_dtype=torch.float16)
 
160
  with gr.Row():
161
  with gr.Accordion('FreeScale Parameters (feel free to adjust these parameters based on your prompt): ', open=False):
162
  with gr.Row():
163
+ output_size = gr.Dropdown(["2048 x 2048", "1024 x 2048", "2048 x 1024"], value="2048 x 2048", label="Output Size (H x W)", info="Due to GPU constraints, run the demo locally for higher resolutions.")
164
  with gr.Row():
165
  ddim_steps = gr.Slider(label='DDIM Steps',
166
  minimum=5,
 
184
  step=1,
185
  value=123)
186
  with gr.Row():
187
+ options = gr.CheckboxGroup(['Disable FreeU'], label='Options (NOT recommended to change)')
188
  with gr.Row():
189
  negative_prompt = gr.Textbox(label='Negative Prompt', value='blurry, ugly, duplicate, poorly drawn, deformed, mosaic')
190