idlebg commited on
Commit
e6b7acb
·
1 Parent(s): a3830bf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -36
app.py CHANGED
@@ -95,7 +95,7 @@ def display_and_download_images(output_images, metadata):
95
 
96
  PIPELINE_NAMES = Literal["txt2img", "inpaint", "img2img"]
97
 
98
- DEFAULT_PROMPT = "sprinkled donut sitting on top of a green cherry apple, colorful hyperrealism"
99
  DEFAULT_WIDTH, DEFAULT_HEIGHT = 512, 512
100
  OUTPUT_IMAGE_KEY = "output_img"
101
  LOADED_IMAGE_KEY = "loaded_image"
@@ -217,40 +217,36 @@ def generate(
217
  with open(f"{filename}.txt", "w") as f:
218
  f.write(prompt)
219
 
 
220
  # After generating the images, clear the GPU cache
221
  torch.cuda.empty_cache()
222
 
 
223
  return output_images # return the list of image objects
224
 
225
 
226
 
227
 
228
 
 
229
  def prompt_and_generate_button(prefix, pipeline_name: PIPELINE_NAMES, **kwargs):
230
- # Change 1: Prompt and Negative prompt to be on 1 line split like width and high (2 columns)
 
 
 
 
 
 
 
 
 
231
  col1, col2 = st.columns(2)
232
  with col1:
233
- prompt = st.text_area(
234
- "Prompt",
235
- value=DEFAULT_PROMPT,
236
- key=f"{prefix}-prompt",
237
- )
238
  with col2:
239
- negative_prompt = st.text_area(
240
- "Negative prompt",
241
- value="(disfigured), bad quality, ((bad art)), ((deformed)), ((extra limbs)), (((duplicate))), ((morbid)), (((ugly)), blurry, ((bad anatomy)), (((bad proportions))), (malformed limbs), ((missing arms)), ((missing legs)), (((extra arms))), (((extra legs))), (fused fingers), (too many fingers), (((long neck))), Deformed, blurry",
242
- key=f"{prefix}-negative-prompt",
243
  )
244
-
245
- # Change 2: Number of inference steps, Guidance scale, and Number of images to generate to be in a line, 3 columns
246
- col1, col2, col3 = st.columns(3)
247
- with col1:
248
- steps = st.slider("Number of inference steps", min_value=11, max_value=20, value=14, key=f"{prefix}-inference-steps")
249
- with col2:
250
- guidance_scale = st.slider("Guidance scale", min_value=0.0, max_value=15.0, value=7.5, step=0.5, key=f"{prefix}-guidance-scale")
251
- with col3:
252
- num_images = st.slider("Number of images to generate", min_value=1, max_value=2, value=1, key=f"{prefix}-num-images")
253
-
254
  # Add a select box for the schedulers
255
  scheduler_name = st.selectbox(
256
  "Choose a Scheduler",
@@ -260,35 +256,41 @@ def prompt_and_generate_button(prefix, pipeline_name: PIPELINE_NAMES, **kwargs):
260
  )
261
  scheduler_class = AVAILABLE_SCHEDULERS[scheduler_name] # Get the selected scheduler class
262
 
263
- pipe = get_pipeline(pipeline_name, scheduler_name=scheduler_name)
264
 
265
- # enable_attention_slicing = st.checkbox('Enable attention slicing (enables higher resolutions but is slower)', key=f"{prefix}-attention-slicing", value=True)
266
- # enable_xformers = st.checkbox('Enable xformers library (better memory usage)', key=f"{prefix}-xformers", value=True)
 
 
 
267
 
268
  images = []
269
 
 
270
  if st.button("Generate images", key=f"{prefix}-btn"):
271
  with st.spinner("Generating image..."):
272
  images = generate(
273
  prompt,
274
  pipeline_name,
275
- num_images=num_images, # add this
276
  negative_prompt=negative_prompt,
277
  steps=steps,
278
  guidance_scale=guidance_scale,
279
- enable_attention_slicing=True, # value always set to True
280
- enable_xformers=True, # value always set to True
281
  **kwargs,
282
  )
283
- for i, image in enumerate(images): # loop over each image
284
- set_image(f"{OUTPUT_IMAGE_KEY}_{i}", image.copy()) # save each image with a unique key
285
- image_indices = [int(key.split('_')[-1]) for key in st.session_state.keys() if OUTPUT_IMAGE_KEY in key]
286
- cols = st.columns(len(image_indices) if image_indices else 1) # create a column for each image or a single one if no images
287
- for i in range(max(image_indices) + 1 if image_indices else 1): # loop over each image index
288
- output_image_key = f"{OUTPUT_IMAGE_KEY}_{i}"
289
- output_image = get_image(output_image_key)
290
- if output_image:
291
- cols[i].image(output_image)
 
 
 
292
 
293
 
294
 
 
95
 
96
  PIPELINE_NAMES = Literal["txt2img", "inpaint", "img2img"]
97
 
98
+ DEFAULT_PROMPT = "sprinkled purple apple donut sitting on top of a ice table, colorful hyperrealism"
99
  DEFAULT_WIDTH, DEFAULT_HEIGHT = 512, 512
100
  OUTPUT_IMAGE_KEY = "output_img"
101
  LOADED_IMAGE_KEY = "loaded_image"
 
217
  with open(f"{filename}.txt", "w") as f:
218
  f.write(prompt)
219
 
220
+
221
  # After generating the images, clear the GPU cache
222
  torch.cuda.empty_cache()
223
 
224
+
225
  return output_images # return the list of image objects
226
 
227
 
228
 
229
 
230
 
231
+
232
  def prompt_and_generate_button(prefix, pipeline_name: PIPELINE_NAMES, **kwargs):
233
+ prompt = st.text_area(
234
+ "Prompt",
235
+ value=DEFAULT_PROMPT,
236
+ key=f"{prefix}-prompt",
237
+ )
238
+ negative_prompt = st.text_area(
239
+ "Negative prompt",
240
+ value="(disfigured), bad quality, ((bad art)), ((deformed)), ((extra limbs)), (((duplicate))), ((morbid)), (((ugly)), blurry, ((bad anatomy)), (((bad proportions))), cloned face, body out of frame, out of frame, bad anatomy, gross proportions, (malformed limbs), ((missing arms)), ((missing legs)), (((extra arms))), (((extra legs))), (fused fingers), (too many fingers), (((long neck))), Deformed, blurry",
241
+ key=f"{prefix}-negative-prompt",
242
+ )
243
  col1, col2 = st.columns(2)
244
  with col1:
245
+ steps = st.slider("Number of inference steps", min_value=11, max_value=69, value=14, key=f"{prefix}-inference-steps")
 
 
 
 
246
  with col2:
247
+ guidance_scale = st.slider(
248
+ "Guidance scale", min_value=0.0, max_value=20.0, value=7.5, step=0.5, key=f"{prefix}-guidance-scale"
 
 
249
  )
 
 
 
 
 
 
 
 
 
 
250
  # Add a select box for the schedulers
251
  scheduler_name = st.selectbox(
252
  "Choose a Scheduler",
 
256
  )
257
  scheduler_class = AVAILABLE_SCHEDULERS[scheduler_name] # Get the selected scheduler class
258
 
 
259
 
260
+ pipe = get_pipeline(pipeline_name, scheduler_name=scheduler_name)
261
+
262
+ # enable_attention_slicing = st.checkbox('Enable attention slicing (enables higher resolutions but is slower)', key=f"{prefix}-attention-slicing", value=True)
263
+ # enable_xformers = st.checkbox('Enable xformers library (better memory usage)', key=f"{prefix}-xformers", value=True)
264
+ num_images = st.slider("Number of images to generate", min_value=1, max_value=2, value=1, key=f"{prefix}-num-images")
265
 
266
  images = []
267
 
268
+
269
  if st.button("Generate images", key=f"{prefix}-btn"):
270
  with st.spinner("Generating image..."):
271
  images = generate(
272
  prompt,
273
  pipeline_name,
274
+ num_images=num_images, # add this
275
  negative_prompt=negative_prompt,
276
  steps=steps,
277
  guidance_scale=guidance_scale,
278
+ enable_attention_slicing=True, # value always set to True
279
+ enable_xformers=True, # value always set to True
280
  **kwargs,
281
  )
282
+
283
+ for i, image in enumerate(images): # loop over each image
284
+ set_image(f"{OUTPUT_IMAGE_KEY}_{i}", image.copy()) # save each image with a unique key
285
+
286
+
287
+ image_indices = [int(key.split('_')[-1]) for key in st.session_state.keys() if OUTPUT_IMAGE_KEY in key]
288
+ cols = st.columns(len(image_indices) if image_indices else 1) # create a column for each image or a single one if no images
289
+ for i in range(max(image_indices) + 1 if image_indices else 1): # loop over each image index
290
+ output_image_key = f"{OUTPUT_IMAGE_KEY}_{i}"
291
+ output_image = get_image(output_image_key)
292
+ if output_image:
293
+ cols[i].image(output_image)
294
 
295
 
296