Spaces:
Sleeping
Sleeping
Updated for V2.0
Browse files- tab_manager.py +29 -20
tab_manager.py
CHANGED
@@ -7,12 +7,14 @@ import pandas as pd
|
|
7 |
import numpy as np
|
8 |
import json
|
9 |
import csv
|
|
|
10 |
from itertools import cycle
|
11 |
import random
|
12 |
import time
|
13 |
import datetime
|
14 |
import zipfile
|
15 |
from io import BytesIO, StringIO
|
|
|
16 |
def completed_setup(tabs, modelID):
|
17 |
with tabs[0]:
|
18 |
st.write("\U0001F917 ", modelID, " has been loaded!")
|
@@ -29,13 +31,15 @@ def completed_setup(tabs, modelID):
|
|
29 |
general_bias_eval_setup(tabs[0], modelID, tabs[3])
|
30 |
with tabs[1]:
|
31 |
task_oriented_bias_eval_setup(tabs[1],modelID, tabs[3])
|
|
|
32 |
def general_bias_eval_setup(tab, modelID, imagesTab):
|
33 |
|
34 |
generalBiasSetupDF_EVAL = pd.DataFrame(
|
35 |
{
|
36 |
"GEN Eval. Variable": ["No. Images to Generate per prompt", "No. Inference Steps",
|
37 |
-
"Image
|
38 |
-
|
|
|
39 |
}
|
40 |
)
|
41 |
generalBiasSetupDF_TYPE = pd.DataFrame(
|
@@ -84,13 +88,13 @@ def general_bias_eval_setup(tab, modelID, imagesTab):
|
|
84 |
)
|
85 |
st.info('Image sizes vary for each model but is generally one of [256, 512, 1024, 2048]. We found that for some models '
|
86 |
'lower image resolutions resulted in noise outputs (you are more than welcome to experiment with this). '
|
87 |
-
'Consult the model card if you are unsure what image resolution to use.
|
88 |
-
|
89 |
-
|
90 |
st.error('Looks like you have entered non-numeric values! '
|
91 |
'Please enter numeric values in the table above', icon="🚨")
|
92 |
# elif not all([check_for_power_of_two(int(GENValTable["GEN Values"][2])), int(GENValTable["GEN Values"][2]) >= 8]):
|
93 |
-
elif int(GENValTable["GEN Values"][2]) < 8:
|
94 |
st.error('Please ensure that your image resolution is 1 number greater than 8. Consult the model card to find the size of the images used'
|
95 |
' to train the model. Incompatible image resolutions may result in noisy output images', icon="🚨")
|
96 |
else:
|
@@ -123,8 +127,9 @@ def task_oriented_bias_eval_setup(tab, modelID, imagesTab):
|
|
123 |
biasSetupDF_EVAL = pd.DataFrame(
|
124 |
{
|
125 |
"TO Eval. Variable": ["No. Images to Generate per prompt", "No. Inference Steps",
|
126 |
-
|
127 |
-
|
|
|
128 |
}
|
129 |
)
|
130 |
with tab:
|
@@ -150,16 +155,16 @@ def task_oriented_bias_eval_setup(tab, modelID, imagesTab):
|
|
150 |
num_rows="fixed",
|
151 |
)
|
152 |
st.info('Image sizes vary for each model but is generally one of [256, 512, 1024, 2048]. We found that for some models '
|
153 |
-
|
154 |
-
|
155 |
-
'image sizes will be supported soon \U0001F601.', icon="ℹ️")
|
156 |
target = st.text_input('What is the single-token target of your task-oriented evaluation study '
|
157 |
'e.g.: "burger", "coffee", "men", "women"')
|
158 |
|
159 |
-
if not all([TOValTable["TO Values"][0].isnumeric(), TOValTable["TO Values"][1].isnumeric(),
|
|
|
160 |
st.error('Looks like you have entered non-numeric values! '
|
161 |
'Please enter numeric values in the table above', icon="🚨")
|
162 |
-
elif int(TOValTable["TO Values"][2]) < 8:
|
163 |
st.error('Please ensure that your image resolution is 1 number greater than 8. Consult the model card to find the size of the images used'
|
164 |
' to train the model. Incompatible image resolutions may result in noisy output images', icon="🚨")
|
165 |
else:
|
@@ -236,7 +241,7 @@ def initiate_general_bias_evaluation(tab, modelID, specs, imagesTab):
|
|
236 |
with infoColumn1:
|
237 |
st.write(" ***No. Images per prompt*** = ", specs[0]["GEN Values"][0])
|
238 |
st.write(" ***No. Steps*** = ", specs[0]["GEN Values"][1])
|
239 |
-
st.write(" ***Image Size*** = ", specs[0]["GEN Values"][2], "$\\times$", specs[0]["GEN Values"][
|
240 |
with infoColumn2:
|
241 |
st.write(" ***Objects*** = ", specs[1]["Check"][0])
|
242 |
st.write(" ***Objects and Actions*** = ", specs[1]["Check"][1])
|
@@ -273,7 +278,8 @@ def initiate_general_bias_evaluation(tab, modelID, specs, imagesTab):
|
|
273 |
OBJECTprogressBar = st.progress(0, text="Generating Object-related images. Please wait.")
|
274 |
objectImages, objectCaptions = MINFER.generate_test_images(OBJECTprogressBar, "Generating Object-related images. Please wait.",
|
275 |
objectPrompts, int(specs[0]["GEN Values"][0]),
|
276 |
-
int(specs[0]["GEN Values"][1]), int(specs[0]["GEN Values"][2])
|
|
|
277 |
evaluationImages+=objectImages
|
278 |
evaluationCaptions+=objectCaptions[0]
|
279 |
TXTObjectPrompts = ""
|
@@ -282,7 +288,8 @@ def initiate_general_bias_evaluation(tab, modelID, specs, imagesTab):
|
|
282 |
OCCprogressBar = st.progress(0, text="Generating Occupation-related images. Please wait.")
|
283 |
occupationImages, occupationCaptions = MINFER.generate_test_images(OCCprogressBar, "Generating Occupation-related images. Please wait.",
|
284 |
occupationPrompts, int(specs[0]["GEN Values"][0]),
|
285 |
-
int(specs[0]["GEN Values"][1]), int(specs[0]["GEN Values"][2])
|
|
|
286 |
evaluationImages += occupationImages
|
287 |
evaluationCaptions += occupationCaptions[0]
|
288 |
|
@@ -301,7 +308,7 @@ def initiate_general_bias_evaluation(tab, modelID, specs, imagesTab):
|
|
301 |
user_evaluation_variables.DIST_BIAS = float(f"{user_evaluation_variables.EVAL_METRICS[2]:.4f}")
|
302 |
user_evaluation_variables.HALLUCINATION = float(f"{np.mean(user_evaluation_variables.EVAL_METRICS[3]):.4f}")
|
303 |
user_evaluation_variables.MISS_RATE = float(f"{np.mean(user_evaluation_variables.EVAL_METRICS[4]):.4f}")
|
304 |
-
user_evaluation_variables.EVAL_ID =
|
305 |
user_evaluation_variables.DATE = datetime.datetime.utcnow().strftime('%d-%m-%Y')
|
306 |
user_evaluation_variables.TIME = datetime.datetime.utcnow().strftime('%H:%M:%S')
|
307 |
user_evaluation_variables.RUN_TIME = str(datetime.timedelta(seconds=elapsedTime)).split(".")[0]
|
@@ -323,7 +330,7 @@ def initiate_task_oriented_bias_evaluation(tab, modelID, specs, target, imagesTa
|
|
323 |
infoColumn1, infoColumn2 = st.columns(2)
|
324 |
st.write(" ***No. Images per prompt*** = ", specs["TO Values"][0])
|
325 |
st.write(" ***No. Steps*** = ", specs["TO Values"][1])
|
326 |
-
st.write(" ***Image Size*** = ", specs["TO Values"][2], "$\\times$", specs["TO Values"][
|
327 |
st.write(" ***Target*** = ", target.lower())
|
328 |
st.markdown("___")
|
329 |
|
@@ -353,7 +360,8 @@ def initiate_task_oriented_bias_evaluation(tab, modelID, specs, target, imagesTa
|
|
353 |
TASKprogressBar = st.progress(0, text="Generating Task-oriented images. Please wait.")
|
354 |
TASKImages, TASKCaptions = MINFER.generate_task_oriented_images(TASKprogressBar,"Generating Task-oriented images. Please wait.",
|
355 |
prompts, cocoIDs, int(specs["TO Values"][0]),
|
356 |
-
int(specs["TO Values"][1]), int(specs["TO Values"][2])
|
|
|
357 |
|
358 |
EVALprogressBar = st.progress(0, text="Evaluating " + modelID + " Model Images. Please wait.")
|
359 |
user_evaluation_variables.EVAL_METRICS = GBM.evaluate_t2i_model_images(TASKImages, TASKCaptions[0], EVALprogressBar, False, "TASK")
|
@@ -367,7 +375,7 @@ def initiate_task_oriented_bias_evaluation(tab, modelID, specs, target, imagesTa
|
|
367 |
user_evaluation_variables.HALLUCINATION = float(f"{np.mean(user_evaluation_variables.EVAL_METRICS[3]):.4f}")
|
368 |
user_evaluation_variables.MISS_RATE = float(f"{np.mean(user_evaluation_variables.EVAL_METRICS[4]):.4f}")
|
369 |
user_evaluation_variables.TASK_TARGET = target.lower()
|
370 |
-
user_evaluation_variables.EVAL_ID =
|
371 |
user_evaluation_variables.DATE = datetime.datetime.utcnow().strftime('%d-%m-%Y')
|
372 |
user_evaluation_variables.TIME = datetime.datetime.utcnow().strftime('%H:%M:%S')
|
373 |
user_evaluation_variables.RUN_TIME = str(datetime.timedelta(seconds=elapsedTime)).split(".")[0]
|
@@ -377,6 +385,7 @@ def initiate_task_oriented_bias_evaluation(tab, modelID, specs, target, imagesTa
|
|
377 |
user_evaluation_variables.TASK_COCOIDs = cocoIDs
|
378 |
|
379 |
user_evaluation_variables.CURRENT_EVAL_TYPE = 'task-oriented'
|
|
|
380 |
def download_and_zip_images(zipImagePath, images, captions, imageType):
|
381 |
if imageType == 'object':
|
382 |
csvFileName = 'object_prompts.csv'
|
|
|
7 |
import numpy as np
|
8 |
import json
|
9 |
import csv
|
10 |
+
import string
|
11 |
from itertools import cycle
|
12 |
import random
|
13 |
import time
|
14 |
import datetime
|
15 |
import zipfile
|
16 |
from io import BytesIO, StringIO
|
17 |
+
|
18 |
def completed_setup(tabs, modelID):
|
19 |
with tabs[0]:
|
20 |
st.write("\U0001F917 ", modelID, " has been loaded!")
|
|
|
31 |
general_bias_eval_setup(tabs[0], modelID, tabs[3])
|
32 |
with tabs[1]:
|
33 |
task_oriented_bias_eval_setup(tabs[1],modelID, tabs[3])
|
34 |
+
|
35 |
def general_bias_eval_setup(tab, modelID, imagesTab):
|
36 |
|
37 |
generalBiasSetupDF_EVAL = pd.DataFrame(
|
38 |
{
|
39 |
"GEN Eval. Variable": ["No. Images to Generate per prompt", "No. Inference Steps",
|
40 |
+
"Image Height - must be a value that is 2 to the power of N",
|
41 |
+
"Image Width - must be a value that is 2 to the power of N"],
|
42 |
+
"GEN Values": ["2", "10", "512", "512"],
|
43 |
}
|
44 |
)
|
45 |
generalBiasSetupDF_TYPE = pd.DataFrame(
|
|
|
88 |
)
|
89 |
st.info('Image sizes vary for each model but is generally one of [256, 512, 1024, 2048]. We found that for some models '
|
90 |
'lower image resolutions resulted in noise outputs (you are more than welcome to experiment with this). '
|
91 |
+
'Consult the model card if you are unsure what image resolution to use.', icon="ℹ️")
|
92 |
+
if not all([GENValTable["GEN Values"][0].isnumeric(), GENValTable["GEN Values"][1].isnumeric(),
|
93 |
+
GENValTable["GEN Values"][2].isnumeric(), GENValTable["GEN Values"][3].isnumeric()]):
|
94 |
st.error('Looks like you have entered non-numeric values! '
|
95 |
'Please enter numeric values in the table above', icon="🚨")
|
96 |
# elif not all([check_for_power_of_two(int(GENValTable["GEN Values"][2])), int(GENValTable["GEN Values"][2]) >= 8]):
|
97 |
+
elif any(int(GENValTable["GEN Values"][2]), int(GENValTable["GEN Values"][3])) < 8:
|
98 |
st.error('Please ensure that your image resolution is 1 number greater than 8. Consult the model card to find the size of the images used'
|
99 |
' to train the model. Incompatible image resolutions may result in noisy output images', icon="🚨")
|
100 |
else:
|
|
|
127 |
biasSetupDF_EVAL = pd.DataFrame(
|
128 |
{
|
129 |
"TO Eval. Variable": ["No. Images to Generate per prompt", "No. Inference Steps",
|
130 |
+
"Image Height - must be a value that is 2 to the power of N",
|
131 |
+
"Image Width - must be a value that is 2 to the power of N"],
|
132 |
+
"TO Values": ["2", "10", "512", "512"],
|
133 |
}
|
134 |
)
|
135 |
with tab:
|
|
|
155 |
num_rows="fixed",
|
156 |
)
|
157 |
st.info('Image sizes vary for each model but is generally one of [256, 512, 1024, 2048]. We found that for some models '
|
158 |
+
'lower image resolutions resulted in noise outputs (you are more than welcome to experiment with this). '
|
159 |
+
'Consult the model card if you are unsure what image resolution to use.', icon="ℹ️")
|
|
|
160 |
target = st.text_input('What is the single-token target of your task-oriented evaluation study '
|
161 |
'e.g.: "burger", "coffee", "men", "women"')
|
162 |
|
163 |
+
if not all([TOValTable["TO Values"][0].isnumeric(), TOValTable["TO Values"][1].isnumeric(),
|
164 |
+
TOValTable["TO Values"][2].isnumeric(), TOValTable["TO Values"][3].isnumeric()]):
|
165 |
st.error('Looks like you have entered non-numeric values! '
|
166 |
'Please enter numeric values in the table above', icon="🚨")
|
167 |
+
elif any(int(TOValTable["TO Values"][2]), int(TOValTable["TO Values"][3])) < 8:
|
168 |
st.error('Please ensure that your image resolution is 1 number greater than 8. Consult the model card to find the size of the images used'
|
169 |
' to train the model. Incompatible image resolutions may result in noisy output images', icon="🚨")
|
170 |
else:
|
|
|
241 |
with infoColumn1:
|
242 |
st.write(" ***No. Images per prompt*** = ", specs[0]["GEN Values"][0])
|
243 |
st.write(" ***No. Steps*** = ", specs[0]["GEN Values"][1])
|
244 |
+
st.write(" ***Image Size*** = ", specs[0]["GEN Values"][2], "$\\times$", specs[0]["GEN Values"][3])
|
245 |
with infoColumn2:
|
246 |
st.write(" ***Objects*** = ", specs[1]["Check"][0])
|
247 |
st.write(" ***Objects and Actions*** = ", specs[1]["Check"][1])
|
|
|
278 |
OBJECTprogressBar = st.progress(0, text="Generating Object-related images. Please wait.")
|
279 |
objectImages, objectCaptions = MINFER.generate_test_images(OBJECTprogressBar, "Generating Object-related images. Please wait.",
|
280 |
objectPrompts, int(specs[0]["GEN Values"][0]),
|
281 |
+
int(specs[0]["GEN Values"][1]), int(specs[0]["GEN Values"][2]),
|
282 |
+
int(specs[0]["GEN Values"][3]))
|
283 |
evaluationImages+=objectImages
|
284 |
evaluationCaptions+=objectCaptions[0]
|
285 |
TXTObjectPrompts = ""
|
|
|
288 |
OCCprogressBar = st.progress(0, text="Generating Occupation-related images. Please wait.")
|
289 |
occupationImages, occupationCaptions = MINFER.generate_test_images(OCCprogressBar, "Generating Occupation-related images. Please wait.",
|
290 |
occupationPrompts, int(specs[0]["GEN Values"][0]),
|
291 |
+
int(specs[0]["GEN Values"][1]), int(specs[0]["GEN Values"][2]),
|
292 |
+
int(specs[0]["GEN Values"][3]))
|
293 |
evaluationImages += occupationImages
|
294 |
evaluationCaptions += occupationCaptions[0]
|
295 |
|
|
|
308 |
user_evaluation_variables.DIST_BIAS = float(f"{user_evaluation_variables.EVAL_METRICS[2]:.4f}")
|
309 |
user_evaluation_variables.HALLUCINATION = float(f"{np.mean(user_evaluation_variables.EVAL_METRICS[3]):.4f}")
|
310 |
user_evaluation_variables.MISS_RATE = float(f"{np.mean(user_evaluation_variables.EVAL_METRICS[4]):.4f}")
|
311 |
+
user_evaluation_variables.EVAL_ID = ''.join(random.choices(string.ascii_letters + string.digits, k=16))
|
312 |
user_evaluation_variables.DATE = datetime.datetime.utcnow().strftime('%d-%m-%Y')
|
313 |
user_evaluation_variables.TIME = datetime.datetime.utcnow().strftime('%H:%M:%S')
|
314 |
user_evaluation_variables.RUN_TIME = str(datetime.timedelta(seconds=elapsedTime)).split(".")[0]
|
|
|
330 |
infoColumn1, infoColumn2 = st.columns(2)
|
331 |
st.write(" ***No. Images per prompt*** = ", specs["TO Values"][0])
|
332 |
st.write(" ***No. Steps*** = ", specs["TO Values"][1])
|
333 |
+
st.write(" ***Image Size*** = ", specs["TO Values"][2], "$\\times$", specs["TO Values"][3])
|
334 |
st.write(" ***Target*** = ", target.lower())
|
335 |
st.markdown("___")
|
336 |
|
|
|
360 |
TASKprogressBar = st.progress(0, text="Generating Task-oriented images. Please wait.")
|
361 |
TASKImages, TASKCaptions = MINFER.generate_task_oriented_images(TASKprogressBar,"Generating Task-oriented images. Please wait.",
|
362 |
prompts, cocoIDs, int(specs["TO Values"][0]),
|
363 |
+
int(specs["TO Values"][1]), int(specs["TO Values"][2]),
|
364 |
+
int(specs["TO Values"][3]))
|
365 |
|
366 |
EVALprogressBar = st.progress(0, text="Evaluating " + modelID + " Model Images. Please wait.")
|
367 |
user_evaluation_variables.EVAL_METRICS = GBM.evaluate_t2i_model_images(TASKImages, TASKCaptions[0], EVALprogressBar, False, "TASK")
|
|
|
375 |
user_evaluation_variables.HALLUCINATION = float(f"{np.mean(user_evaluation_variables.EVAL_METRICS[3]):.4f}")
|
376 |
user_evaluation_variables.MISS_RATE = float(f"{np.mean(user_evaluation_variables.EVAL_METRICS[4]):.4f}")
|
377 |
user_evaluation_variables.TASK_TARGET = target.lower()
|
378 |
+
user_evaluation_variables.EVAL_ID = ''.join(random.choices(string.ascii_letters + string.digits, k=16))
|
379 |
user_evaluation_variables.DATE = datetime.datetime.utcnow().strftime('%d-%m-%Y')
|
380 |
user_evaluation_variables.TIME = datetime.datetime.utcnow().strftime('%H:%M:%S')
|
381 |
user_evaluation_variables.RUN_TIME = str(datetime.timedelta(seconds=elapsedTime)).split(".")[0]
|
|
|
385 |
user_evaluation_variables.TASK_COCOIDs = cocoIDs
|
386 |
|
387 |
user_evaluation_variables.CURRENT_EVAL_TYPE = 'task-oriented'
|
388 |
+
|
389 |
def download_and_zip_images(zipImagePath, images, captions, imageType):
|
390 |
if imageType == 'object':
|
391 |
csvFileName = 'object_prompts.csv'
|