JVice commited on
Commit
fcedab0
β€’
1 Parent(s): b81002f

Updated the download functionality

Browse files

For web-app purposes, a download button is necessary as saving locally doesn't make sense. Just a simple

Files changed (1) hide show
  1. tab_manager.py +51 -55
tab_manager.py CHANGED
@@ -90,20 +90,21 @@ def general_bias_eval_setup(tab, modelID, imagesTab):
90
 
91
  if user_evaluation_variables.RUN_TIME and user_evaluation_variables.CURRENT_EVAL_TYPE == 'general':
92
  GBM.output_eval_results(user_evaluation_variables.EVAL_METRICS, 21, 'general')
 
 
 
93
  st.write("\U0001F553 Time Taken: ", user_evaluation_variables.RUN_TIME)
94
-
95
  saveEvalsButton = st.button("Save + Upload Evaluations", key='SAVE_EVAL_GEN')
96
- saveDistButton = st.button("Download Object Distribution", key='SAVE_TOP_GEN')
 
 
 
97
  if saveEvalsButton:
98
  st.write("Saving and uploading evaluations")
99
  user_evaluation_variables.update_evaluation_table('general',False)
100
  user_evaluation_variables.reset_variables('general')
101
- if saveDistButton:
102
- download_word_distribution_csv(user_evaluation_variables.EVAL_METRICS,
103
- user_evaluation_variables.EVAL_ID, 'general')
104
-
105
 
106
- def task_oriented_bias_eval_setup(tab,modelID,imagesTab):
107
  biasSetupDF_EVAL = pd.DataFrame(
108
  {
109
  "TO Eval. Variable": ["No. Images to Generate per prompt", "No. Inference Steps", "Image Size (N x N)"],
@@ -144,33 +145,31 @@ def task_oriented_bias_eval_setup(tab,modelID,imagesTab):
144
  # update_images_tab(imagesTab)
145
  if user_evaluation_variables.RUN_TIME and user_evaluation_variables.CURRENT_EVAL_TYPE == 'task-oriented':
146
  GBM.output_eval_results(user_evaluation_variables.EVAL_METRICS, 21, 'task-oriented')
 
 
 
147
  st.write("\U0001F553 Time Taken: ", user_evaluation_variables.RUN_TIME)
148
  saveEvalsButton = st.button("Save + Upload Evaluations", key='SAVE_EVAL_TASK')
149
- saveDistButton = st.button("Download Object Distribution", key='SAVE_TOP_TASK')
 
 
 
150
  if saveEvalsButton:
151
  st.write("Saving and uploading evaluations")
152
- user_evaluation_variables.update_evaluation_table('task-oriented',False)
153
  user_evaluation_variables.reset_variables('task-oriented')
154
- if saveDistButton:
155
- download_word_distribution_csv(user_evaluation_variables.EVAL_METRICS,
156
- user_evaluation_variables.EVAL_ID, user_evaluation_variables.TASK_TARGET)
157
- # update_images_tab(imagesTab)
158
-
159
- def download_word_distribution_csv(data, evalID, evalType):
160
- filePath = './'+evalID+'_'+evalType+'_word_distribution.csv'
161
 
 
162
  listOfObjects = list(data[0].items())
163
- with open(filePath, 'w', newline='') as fp:
164
- csvwriter = csv.writer(fp)
165
- csvwriter.writerows([["Evaluation ID", evalID],
166
- ["Distribution Bias", data[2]],
167
- ["Jaccard hallucination", np.mean(data[3])],
168
- ["Generative Miss Rate", np.mean(data[4])]])
169
- csvwriter.writerow(['Position', 'Object', 'No. Occurences', 'Normalized'])
170
- for obj, val, norm, ii in zip(listOfObjects, data[0].values(), data[1], range(len(listOfObjects))):
171
- csvwriter.writerow([ii, obj[0], val, norm])
172
- st.success('Successfully downloaded word distribution data!', icon="βœ…")
173
-
174
  def initiate_general_bias_evaluation(tab, modelID, specs, imagesTab):
175
  startTime = time.time()
176
  objectData = None
@@ -202,9 +201,9 @@ def initiate_general_bias_evaluation(tab, modelID, specs, imagesTab):
202
  st.write(" ***Occupations*** = ", specs[1]["Check"][2])
203
  st.markdown("___")
204
  if specs[1]["Check"][0]:
205
- objectData = read_csv_to_list("./data/list_of_objects.csv")
206
  if specs[1]["Check"][2]:
207
- occupationData = read_csv_to_list("./data/list_of_occupations.csv")
208
  if objectData == None and occupationData == None:
209
  st.error('Make sure that at least one of the "Objects" or "Occupations" rows are checked', icon="🚨")
210
  else:
@@ -250,7 +249,6 @@ def initiate_general_bias_evaluation(tab, modelID, specs, imagesTab):
250
  user_evaluation_variables.EVAL_METRICS = GBM.evaluate_t2i_model_images(evaluationImages, evaluationCaptions, EVALprogressBar, False, "GENERAL")
251
  # GBM.output_eval_results(user_evaluation_variables.EVAL_METRICS, 21)
252
  elapsedTime = time.time() - startTime
253
- # st.write("\U0001F553 Time Taken: ", str(datetime.timedelta(seconds=elapsedTime)).split(".")[0])
254
 
255
  user_evaluation_variables.NO_SAMPLES = len(evaluationImages)
256
  user_evaluation_variables.RESOLUTION = specs[0]["GEN Values"][2] + "x" + specs[0]["GEN Values"][2]
@@ -294,7 +292,7 @@ def initiate_task_oriented_bias_evaluation(tab, modelID, specs, target, imagesTa
294
  icon="🚨")
295
  else:
296
  COCOLoadingBar = st.progress(0, text="Scanning through COCO Dataset for relevant prompts. Please wait")
297
- prompts, cocoIDs = get_COCO_captions('./data/COCO_captions.json', target.lower(), COCOLoadingBar, captionsToExtract)
298
  if len(prompts) == 0:
299
  st.error('Woops! Could not find **ANY** relevant COCO prompts for the target: '+target.lower()+
300
  '\nPlease input a different target', icon="🚨")
@@ -318,10 +316,7 @@ def initiate_task_oriented_bias_evaluation(tab, modelID, specs, target, imagesTa
318
  EVALprogressBar = st.progress(0, text="Evaluating " + modelID + " Model Images. Please wait.")
319
  user_evaluation_variables.EVAL_METRICS = GBM.evaluate_t2i_model_images(TASKImages, TASKCaptions[0], EVALprogressBar, False, "TASK")
320
 
321
-
322
- # GBM.output_eval_results(user_evaluation_variables.EVAL_METRICS, 21)
323
  elapsedTime = time.time() - startTime
324
- # st.write("\U0001F553 Time Taken: ", str(datetime.timedelta(seconds=elapsedTime)).split(".")[0])
325
 
326
  user_evaluation_variables.NO_SAMPLES = len(TASKImages)
327
  user_evaluation_variables.RESOLUTION = specs["TO Values"][2]+"x"+specs["TO Values"][2]
@@ -340,16 +335,19 @@ def initiate_task_oriented_bias_evaluation(tab, modelID, specs, target, imagesTa
340
  user_evaluation_variables.TASK_COCOIDs = cocoIDs
341
 
342
  user_evaluation_variables.CURRENT_EVAL_TYPE = 'task-oriented'
343
-
344
-
345
  def download_and_zip_images(zipImagePath, images, captions, imageType):
346
- csvFileName = None
347
  if imageType == 'object':
348
  csvFileName = 'object_prompts.csv'
 
 
349
  elif imageType == 'occupation':
350
  csvFileName = 'occupation_prompts.csv'
 
 
351
  else:
352
  csvFileName = 'task-oriented_prompts.csv'
 
 
353
  with st.spinner("Zipping images..."):
354
  with zipfile.ZipFile(zipImagePath, 'w') as img_zip:
355
  for idx, image in enumerate(images):
@@ -372,7 +370,9 @@ def download_and_zip_images(zipImagePath, images, captions, imageType):
372
  csvwriter.writerow([id, prompt])
373
 
374
  img_zip.writestr(csvFileName, string_buffer.getvalue())
375
- st.success('Successfully zipped and downloaded images!', icon="βœ…")
 
 
376
 
377
 
378
  def update_images_tab(imagesTab):
@@ -395,11 +395,9 @@ def update_images_tab(imagesTab):
395
  for idx, image in enumerate(user_evaluation_variables.OBJECT_IMAGES):
396
  next(cols).image(image, width=225, caption=user_evaluation_variables.OBJECT_CAPTIONS[1][idx])
397
 
398
- saveObjectImages = st.button("Save Object-related Images")
399
- if saveObjectImages:
400
- zipPath = 'TBYB_' + user_evaluation_variables.USERNAME + '_' + user_evaluation_variables.EVAL_ID + '_object_related_images.zip'
401
- download_and_zip_images(zipPath, user_evaluation_variables.OBJECT_IMAGES,
402
- user_evaluation_variables.OBJECT_CAPTIONS, 'object')
403
 
404
  if len(user_evaluation_variables.OCCUPATION_IMAGES) > 0:
405
  user_evaluation_variables.OCCUPATION_IMAGES_IN_UI = True
@@ -419,11 +417,10 @@ def update_images_tab(imagesTab):
419
  for idx, image in enumerate(user_evaluation_variables.OCCUPATION_IMAGES):
420
  next(cols).image(image, width=225, caption=user_evaluation_variables.OCCUPATION_CAPTIONS[1][idx])
421
 
422
- saveOccupationImages = st.button("Save Occupation-related Images")
423
- if saveOccupationImages:
424
- zipPath = 'TBYB_' + user_evaluation_variables.USERNAME + '_' + user_evaluation_variables.EVAL_ID + '_occupation_related_images.zip'
425
- download_and_zip_images(zipPath, user_evaluation_variables.OCCUPATION_IMAGES,
426
- user_evaluation_variables.OCCUPATION_CAPTIONS, 'occupation')
427
  if len(user_evaluation_variables.TASK_IMAGES) > 0:
428
  with st.expander(user_evaluation_variables.TASK_TARGET+'-related Images'):
429
  user_evaluation_variables.TASK_IMAGES_IN_UI = True
@@ -443,11 +440,10 @@ def update_images_tab(imagesTab):
443
  for idx, image in enumerate(user_evaluation_variables.TASK_IMAGES):
444
  next(cols).image(image, width=225, caption=user_evaluation_variables.TASK_CAPTIONS[1][idx])
445
 
446
- saveTaskImages = st.button("Save Task-oriented Images")
447
- if saveTaskImages:
448
- zipPath = 'TBYB_' + user_evaluation_variables.USERNAME + '_' + user_evaluation_variables.EVAL_ID + '_'+ user_evaluation_variables.TASK_TARGET+'-oriented_images.zip'
449
- download_and_zip_images(zipPath, user_evaluation_variables.TASK_IMAGES,
450
- user_evaluation_variables.TASK_CAPTIONS, 'task-oriented')
451
 
452
  def get_COCO_captions(filePath, target, progressBar, NPrompts=50):
453
  captionData = json.load(open(filePath))
@@ -455,12 +451,12 @@ def get_COCO_captions(filePath, target, progressBar, NPrompts=50):
455
  COCOIDs = []
456
  random.seed(42)
457
  random.shuffle(captionData['annotations'])
458
- for anno in captionData['annotations']:
459
  if target in anno.get('caption').lower().split(' '):
460
  if len(COCOCaptions) < NPrompts:
461
  COCOCaptions.append(anno.get('caption').lower())
462
  COCOIDs.append(str(anno.get('id')))
463
- percentComplete = len(COCOCaptions) / NPrompts
464
  progressBar.progress(percentComplete, text="Scanning through COCO Dataset for relevant prompts. Please wait")
465
  return (COCOCaptions, COCOIDs)
466
  def read_csv_to_list(filePath):
 
90
 
91
  if user_evaluation_variables.RUN_TIME and user_evaluation_variables.CURRENT_EVAL_TYPE == 'general':
92
  GBM.output_eval_results(user_evaluation_variables.EVAL_METRICS, 21, 'general')
93
+ genCSVData = create_word_distribution_csv(user_evaluation_variables.EVAL_METRICS,
94
+ user_evaluation_variables.EVAL_ID,
95
+ 'general')
96
  st.write("\U0001F553 Time Taken: ", user_evaluation_variables.RUN_TIME)
 
97
  saveEvalsButton = st.button("Save + Upload Evaluations", key='SAVE_EVAL_GEN')
98
+ st.download_button(label="Download Object Distribution data", data=genCSVData, key='SAVE_TOP_GEN',
99
+ file_name=user_evaluation_variables.EVAL_ID + '_general' + '_word_distribution.csv',
100
+ mime='text/csv')
101
+
102
  if saveEvalsButton:
103
  st.write("Saving and uploading evaluations")
104
  user_evaluation_variables.update_evaluation_table('general',False)
105
  user_evaluation_variables.reset_variables('general')
 
 
 
 
106
 
107
+ def task_oriented_bias_eval_setup(tab, modelID, imagesTab):
108
  biasSetupDF_EVAL = pd.DataFrame(
109
  {
110
  "TO Eval. Variable": ["No. Images to Generate per prompt", "No. Inference Steps", "Image Size (N x N)"],
 
145
  # update_images_tab(imagesTab)
146
  if user_evaluation_variables.RUN_TIME and user_evaluation_variables.CURRENT_EVAL_TYPE == 'task-oriented':
147
  GBM.output_eval_results(user_evaluation_variables.EVAL_METRICS, 21, 'task-oriented')
148
+ taskCSVData = create_word_distribution_csv(user_evaluation_variables.EVAL_METRICS,
149
+ user_evaluation_variables.EVAL_ID,
150
+ user_evaluation_variables.TASK_TARGET)
151
  st.write("\U0001F553 Time Taken: ", user_evaluation_variables.RUN_TIME)
152
  saveEvalsButton = st.button("Save + Upload Evaluations", key='SAVE_EVAL_TASK')
153
+ st.download_button(label="Download Object Distribution data", data=taskCSVData, key='SAVE_TOP_TASK',
154
+ file_name=user_evaluation_variables.EVAL_ID+'_'+user_evaluation_variables.TASK_TARGET+'_word_distribution.csv',
155
+ mime='text/csv')
156
+
157
  if saveEvalsButton:
158
  st.write("Saving and uploading evaluations")
159
+ user_evaluation_variables.update_evaluation_table('task-oriented', False)
160
  user_evaluation_variables.reset_variables('task-oriented')
 
 
 
 
 
 
 
161
 
162
+ def create_word_distribution_csv(data, evalID, evalType):
163
  listOfObjects = list(data[0].items())
164
+ csvContents = [["Evaluation Type/Target", evalType],
165
+ ["Evaluation ID", evalID],
166
+ ["Distribution Bias", data[2]],
167
+ ["Jaccard hallucination", np.mean(data[3])],
168
+ ["Generative Miss Rate", np.mean(data[4])],
169
+ ['Position', 'Object', 'No. Occurences', 'Normalized']]
170
+ for obj, val, norm, ii in zip(listOfObjects, data[0].values(), data[1], range(len(listOfObjects))):
171
+ csvContents.append([ii, obj[0], val, norm])
172
+ return pd.DataFrame(csvContents).to_csv(header=False,index=False).encode('utf-8')
 
 
173
  def initiate_general_bias_evaluation(tab, modelID, specs, imagesTab):
174
  startTime = time.time()
175
  objectData = None
 
201
  st.write(" ***Occupations*** = ", specs[1]["Check"][2])
202
  st.markdown("___")
203
  if specs[1]["Check"][0]:
204
+ objectData = read_csv_to_list("./list_of_objects.csv")
205
  if specs[1]["Check"][2]:
206
+ occupationData = read_csv_to_list("./list_of_occupations.csv")
207
  if objectData == None and occupationData == None:
208
  st.error('Make sure that at least one of the "Objects" or "Occupations" rows are checked', icon="🚨")
209
  else:
 
249
  user_evaluation_variables.EVAL_METRICS = GBM.evaluate_t2i_model_images(evaluationImages, evaluationCaptions, EVALprogressBar, False, "GENERAL")
250
  # GBM.output_eval_results(user_evaluation_variables.EVAL_METRICS, 21)
251
  elapsedTime = time.time() - startTime
 
252
 
253
  user_evaluation_variables.NO_SAMPLES = len(evaluationImages)
254
  user_evaluation_variables.RESOLUTION = specs[0]["GEN Values"][2] + "x" + specs[0]["GEN Values"][2]
 
292
  icon="🚨")
293
  else:
294
  COCOLoadingBar = st.progress(0, text="Scanning through COCO Dataset for relevant prompts. Please wait")
295
+ prompts, cocoIDs = get_COCO_captions('./COCO_captions.json', target.lower(), COCOLoadingBar, captionsToExtract)
296
  if len(prompts) == 0:
297
  st.error('Woops! Could not find **ANY** relevant COCO prompts for the target: '+target.lower()+
298
  '\nPlease input a different target', icon="🚨")
 
316
  EVALprogressBar = st.progress(0, text="Evaluating " + modelID + " Model Images. Please wait.")
317
  user_evaluation_variables.EVAL_METRICS = GBM.evaluate_t2i_model_images(TASKImages, TASKCaptions[0], EVALprogressBar, False, "TASK")
318
 
 
 
319
  elapsedTime = time.time() - startTime
 
320
 
321
  user_evaluation_variables.NO_SAMPLES = len(TASKImages)
322
  user_evaluation_variables.RESOLUTION = specs["TO Values"][2]+"x"+specs["TO Values"][2]
 
335
  user_evaluation_variables.TASK_COCOIDs = cocoIDs
336
 
337
  user_evaluation_variables.CURRENT_EVAL_TYPE = 'task-oriented'
 
 
338
  def download_and_zip_images(zipImagePath, images, captions, imageType):
 
339
  if imageType == 'object':
340
  csvFileName = 'object_prompts.csv'
341
+ buttonText = "Download Object-related Images"
342
+ buttonKey = "DOWNLOAD_IMAGES_OBJECT"
343
  elif imageType == 'occupation':
344
  csvFileName = 'occupation_prompts.csv'
345
+ buttonText = "Download Occupation-related Images"
346
+ buttonKey = "DOWNLOAD_IMAGES_OCCUPATION"
347
  else:
348
  csvFileName = 'task-oriented_prompts.csv'
349
+ buttonText = "Download Task-oriented Images"
350
+ buttonKey = "DOWNLOAD_IMAGES_TASK"
351
  with st.spinner("Zipping images..."):
352
  with zipfile.ZipFile(zipImagePath, 'w') as img_zip:
353
  for idx, image in enumerate(images):
 
370
  csvwriter.writerow([id, prompt])
371
 
372
  img_zip.writestr(csvFileName, string_buffer.getvalue())
373
+ with open(zipImagePath, 'rb') as f:
374
+ st.download_button(label=buttonText, data=f, key=buttonKey,
375
+ file_name=zipImagePath)
376
 
377
 
378
  def update_images_tab(imagesTab):
 
395
  for idx, image in enumerate(user_evaluation_variables.OBJECT_IMAGES):
396
  next(cols).image(image, width=225, caption=user_evaluation_variables.OBJECT_CAPTIONS[1][idx])
397
 
398
+ zipPath = 'TBYB_' + user_evaluation_variables.USERNAME + '_' + user_evaluation_variables.DATE + '_' + user_evaluation_variables.TIME + '_object_related_images.zip'
399
+ download_and_zip_images(zipPath, user_evaluation_variables.OBJECT_IMAGES,
400
+ user_evaluation_variables.OBJECT_CAPTIONS, 'object')
 
 
401
 
402
  if len(user_evaluation_variables.OCCUPATION_IMAGES) > 0:
403
  user_evaluation_variables.OCCUPATION_IMAGES_IN_UI = True
 
417
  for idx, image in enumerate(user_evaluation_variables.OCCUPATION_IMAGES):
418
  next(cols).image(image, width=225, caption=user_evaluation_variables.OCCUPATION_CAPTIONS[1][idx])
419
 
420
+ zipPath = 'TBYB_' + user_evaluation_variables.USERNAME + '_' + user_evaluation_variables.DATE + '_' + user_evaluation_variables.TIME + '_occupation_related_images.zip'
421
+
422
+ download_and_zip_images(zipPath, user_evaluation_variables.OCCUPATION_IMAGES,
423
+ user_evaluation_variables.OCCUPATION_CAPTIONS, 'occupation')
 
424
  if len(user_evaluation_variables.TASK_IMAGES) > 0:
425
  with st.expander(user_evaluation_variables.TASK_TARGET+'-related Images'):
426
  user_evaluation_variables.TASK_IMAGES_IN_UI = True
 
440
  for idx, image in enumerate(user_evaluation_variables.TASK_IMAGES):
441
  next(cols).image(image, width=225, caption=user_evaluation_variables.TASK_CAPTIONS[1][idx])
442
 
443
+ zipPath = 'TBYB_' + user_evaluation_variables.USERNAME + '_' + user_evaluation_variables.DATE + '_' + user_evaluation_variables.TIME + '_' + user_evaluation_variables.TASK_TARGET + '_related_images.zip'
444
+ download_and_zip_images(zipPath, user_evaluation_variables.TASK_IMAGES,
445
+ user_evaluation_variables.TASK_CAPTIONS, 'task-oriented')
446
+
 
447
 
448
  def get_COCO_captions(filePath, target, progressBar, NPrompts=50):
449
  captionData = json.load(open(filePath))
 
451
  COCOIDs = []
452
  random.seed(42)
453
  random.shuffle(captionData['annotations'])
454
+ for anno, pp in zip(captionData['annotations'], range(len(captionData['annotations']))):
455
  if target in anno.get('caption').lower().split(' '):
456
  if len(COCOCaptions) < NPrompts:
457
  COCOCaptions.append(anno.get('caption').lower())
458
  COCOIDs.append(str(anno.get('id')))
459
+ percentComplete = pp/len(captionData['annotations'])
460
  progressBar.progress(percentComplete, text="Scanning through COCO Dataset for relevant prompts. Please wait")
461
  return (COCOCaptions, COCOIDs)
462
  def read_csv_to_list(filePath):