pranavajay commited on
Commit
dd114c6
1 Parent(s): f96320e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -23
app.py CHANGED
@@ -1,46 +1,48 @@
1
  import gradio as gr
2
  import numpy as np
3
  import random
4
- from diffusers import DiffusionPipeline
5
  import torch
6
 
7
  device = "cuda" if torch.cuda.is_available() else "cpu"
8
 
9
  if torch.cuda.is_available():
10
  torch.cuda.max_memory_allocated(device=device)
11
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
12
  pipe.enable_xformers_memory_efficient_attention()
13
  pipe = pipe.to(device)
 
14
  else:
15
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
16
  pipe = pipe.to(device)
 
17
 
18
  MAX_SEED = np.iinfo(np.int32).max
19
  MAX_IMAGE_SIZE = 1024
20
 
21
- def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
22
-
23
  if randomize_seed:
24
  seed = random.randint(0, MAX_SEED)
25
 
26
  generator = torch.Generator().manual_seed(seed)
27
 
28
  image = pipe(
29
- prompt = prompt,
30
- negative_prompt = negative_prompt,
31
- guidance_scale = guidance_scale,
32
- num_inference_steps = num_inference_steps,
33
- width = width,
34
- height = height,
35
- generator = generator
36
- ).images[0]
37
 
38
  return image
39
 
40
  examples = [
41
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
42
  "An astronaut riding a green horse",
43
- "A delicious ceviche cheesecake slice",
44
  ]
45
 
46
  css="""
@@ -79,12 +81,6 @@ with gr.Blocks(css=css) as demo:
79
 
80
  with gr.Accordion("Advanced Settings", open=False):
81
 
82
- negative_prompt = gr.Text(
83
- label="Negative prompt",
84
- max_lines=1,
85
- placeholder="Enter a negative prompt",
86
- visible=False,
87
- )
88
 
89
  seed = gr.Slider(
90
  label="Seed",
@@ -129,7 +125,7 @@ with gr.Blocks(css=css) as demo:
129
  minimum=1,
130
  maximum=12,
131
  step=1,
132
- value=2,
133
  )
134
 
135
  gr.Examples(
@@ -139,7 +135,7 @@ with gr.Blocks(css=css) as demo:
139
 
140
  run_button.click(
141
  fn = infer,
142
- inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
143
  outputs = [result]
144
  )
145
 
 
1
  import gradio as gr
2
  import numpy as np
3
  import random
4
+ from diffusers import FluxPipeline
5
  import torch
6
 
7
  device = "cuda" if torch.cuda.is_available() else "cpu"
8
 
9
  if torch.cuda.is_available():
10
  torch.cuda.max_memory_allocated(device=device)
11
+ pipe = FluxPipeline.from_pretrained("enhanceaiteam/kalpana", torch_dtype=torch.bfloat16)
12
  pipe.enable_xformers_memory_efficient_attention()
13
  pipe = pipe.to(device)
14
+ pipe.enable_model_cpu_offload()
15
  else:
16
+ pipe = FluxPipeline.from_pretrained("enhanceaiteam/kalpana", torch_dtype=torch.bfloat16)
17
  pipe = pipe.to(device)
18
+ pipe.enable_model_cpu_offload()
19
 
20
  MAX_SEED = np.iinfo(np.int32).max
21
  MAX_IMAGE_SIZE = 1024
22
 
23
+ def infer(prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
24
+
25
  if randomize_seed:
26
  seed = random.randint(0, MAX_SEED)
27
 
28
  generator = torch.Generator().manual_seed(seed)
29
 
30
  image = pipe(
31
+ prompt=prompt,
32
+ guidance_scale=guidance_scale,
33
+ height=height,
34
+ width=width,
35
+ num_inference_steps=num_inference_steps,
36
+ max_sequence_length=256,
37
+ generator=generator,
38
+ ).images[0]
39
 
40
  return image
41
 
42
  examples = [
43
+ "A cat holding a sign that says 'hello world'",
44
  "An astronaut riding a green horse",
45
+ "A futuristic cityscape at sunset",
46
  ]
47
 
48
  css="""
 
81
 
82
  with gr.Accordion("Advanced Settings", open=False):
83
 
 
 
 
 
 
 
84
 
85
  seed = gr.Slider(
86
  label="Seed",
 
125
  minimum=1,
126
  maximum=12,
127
  step=1,
128
+ value=4,
129
  )
130
 
131
  gr.Examples(
 
135
 
136
  run_button.click(
137
  fn = infer,
138
+ inputs = [prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
139
  outputs = [result]
140
  )
141