Spaces:
Sleeping
Sleeping
Update general_bias_measurement.py
Browse files
general_bias_measurement.py
CHANGED
@@ -206,7 +206,7 @@ def evaluate_t2i_model_images(images, prompts, progressBar, debugging, evalType)
|
|
206 |
normalisedDistribution, B_D = calculate_distribution_bias(list(sortedDistributionBiasDict.values()))
|
207 |
|
208 |
return (sortedDistributionBiasDict, normalisedDistribution, B_D, hallucinationBiases, CLIPMissRates, CLIPErrors)
|
209 |
-
def output_eval_results(metrics, topX, evalType):
|
210 |
sortedDistributionBiasList = list(metrics[0].items())
|
211 |
th_props = [
|
212 |
('font-size', '16px'),
|
@@ -238,6 +238,8 @@ def output_eval_results(metrics, topX, evalType):
|
|
238 |
st.header("\U0001F30E General Bias Evaluation Results")
|
239 |
else:
|
240 |
st.header("\U0001F3AF Task-Oriented Bias Evaluation Results")
|
|
|
|
|
241 |
|
242 |
st.table(pd.DataFrame([["Distribution Bias",metrics[2]],["Jaccard Hallucination", np.mean(metrics[3])],
|
243 |
["Generative Miss Rate", np.mean(metrics[4])]],
|
|
|
206 |
normalisedDistribution, B_D = calculate_distribution_bias(list(sortedDistributionBiasDict.values()))
|
207 |
|
208 |
return (sortedDistributionBiasDict, normalisedDistribution, B_D, hallucinationBiases, CLIPMissRates, CLIPErrors)
|
209 |
+
def output_eval_results(metrics, evalID, topX, evalType):
|
210 |
sortedDistributionBiasList = list(metrics[0].items())
|
211 |
th_props = [
|
212 |
('font-size', '16px'),
|
|
|
238 |
st.header("\U0001F30E General Bias Evaluation Results")
|
239 |
else:
|
240 |
st.header("\U0001F3AF Task-Oriented Bias Evaluation Results")
|
241 |
+
|
242 |
+
st.write("**Evaluation ID**:\t", evalID)
|
243 |
|
244 |
st.table(pd.DataFrame([["Distribution Bias",metrics[2]],["Jaccard Hallucination", np.mean(metrics[3])],
|
245 |
["Generative Miss Rate", np.mean(metrics[4])]],
|