sasha HF staff commited on
Commit
4842425
·
1 Parent(s): fe29958

Update app.py

Browse files

adding headers

Files changed (1) hide show
  1. app.py +16 -4
app.py CHANGED
@@ -22,7 +22,9 @@ with gr.Blocks() as demo:
22
  <p style="margin-bottom: 14px; font-size: 100%"> As AI-enabled Text-to-Image models are becoming increasingly used, characterizing the social biases they exhibit is a necessary first step to lowering their risk of discriminatory outcomes. <br> We compare three such models: <b> Stable Diffusion v.1.4, Stable Diffusion v.2. </b>, and <b> Dall-E 2 </b>, prompting them to produce images of different <i> professions </i> and <i> identity characteristics </i>. <br> Since artificial depictions of fictive humans have no inherent gender or ethnicity nor do they belong to socially-constructed groups, we pursued our analysis <i> without </i> ascribing gender and ethnicity categories to the images generated, still finding clear evidence of ethnicity and gender biases. You can explore these findings in the sections below: </p>
23
  ''')
24
 
25
-
 
 
26
 
27
  with gr.Accordion("Identity group results (ethnicity and gender)", open=False):
28
  gr.HTML('''
@@ -38,7 +40,10 @@ with gr.Blocks() as demo:
38
  gr.HTML('''
39
  <p style="margin-bottom: 14px; font-size: 100%"> You can see that the models reflect many societal biases -- for instance representing Native Americans wearing traditional headdresses, non-binary people with stereotypical haircuts and glasses, and East Asian men with features that amplify ethnic stereotypes. <br> This is problematic because it reinforces existing cultural stereotypes and fails to represent the diversity that is present in all identity groups.</p>
40
  ''')
41
-
 
 
 
42
 
43
  with gr.Accordion("Bias Exploration", open=False):
44
  gr.HTML('''
@@ -50,7 +55,7 @@ with gr.Blocks() as demo:
50
  <p style="margin-bottom: 14px; font-size: 100%"> Using the <b><a href='https://huggingface.co/spaces/society-ethics/DiffusionClustering' style='text-decoration: underline;' target='_blank'> Diffusion Cluster Explorer</a></b>, we can see that the top cluster for the CEO and director professions is <b> Cluster 4</b>: </p> ''')
51
  with gr.Column():
52
  ceo_img = gr.Image(Image.open("images/bias/ceo_dir.png"), label = "CEO Image", show_label=False)
53
-
54
  with gr.Row():
55
  with gr.Column():
56
  gr.HTML('''
@@ -70,6 +75,10 @@ with gr.Blocks() as demo:
70
  with gr.Column(scale=2):
71
  cluster4 = gr.Image(Image.open("images/bias/Cluster2.png"), label = "Cluster 2 Image", show_label=False)
72
 
 
 
 
 
73
  with gr.Accordion("Comparing model generations", open=False):
74
  gr.HTML('''
75
  <p style="margin-bottom: 14px; font-size: 100%"> One of the goals of our study was allowing users to compare model generations across professions in an open-ended way, uncovering patterns and trends on their own. This is why we created the <a href='https://huggingface.co/spaces/society-ethics/DiffusionBiasExplorer' style='text-decoration: underline;' target='_blank'> Diffusion Bias Explorer </a> and the <a href='https://huggingface.co/spaces/society-ethics/Average_diffusion_faces' style='text-decoration: underline;' target='_blank'> Average Diffusion Faces </a> tools. We show some of their functionalities below: </p> ''')
@@ -90,6 +99,9 @@ with gr.Blocks() as demo:
90
  gr.HTML('''
91
  <p style="margin-bottom: 14px; font-size: 100%"> Looking at the average faces for a given profession across multiple models can help see the dominant characteristics of that profession, as well as how much variation there is (based on how fuzzy the image is). <br> In the images shown here, we can see that representations of these professions significantly differ across the three models, while sharing common characteristics, e.g. <i> postal workers </i> all wear caps. <br> Also, the average faces of <i> hairdressers </i> seem more fuzzy than the other professions, indicating a higher diversity among the generations compared to other professions. <br> Look at the <a href='https://huggingface.co/spaces/society-ethics/Average_diffusion_faces' style='text-decoration: underline;' target='_blank'> Average Diffusion Faces </a> tool for more examples! </p>''')
92
 
 
 
 
93
  with gr.Accordion("Exploring the pixel space of generated images", open=False):
94
  gr.HTML('''
95
  <br>
@@ -104,7 +116,7 @@ with gr.Blocks() as demo:
104
  ### All of the tools created as part of this project:
105
  """)
106
  gr.HTML('''
107
- <p style="margin-bottom: 10px; font-size: 94%">
108
  <a href='https://huggingface.co/spaces/society-ethics/Average_diffusion_faces' style='text-decoration: underline;' target='_blank'> Average Diffusion Faces </a> <br>
109
  <a href='https://huggingface.co/spaces/society-ethics/DiffusionBiasExplorer' style='text-decoration: underline;' target='_blank'> Diffusion Bias Explorer </a> <br>
110
  <a href='https://huggingface.co/spaces/society-ethics/DiffusionClustering' style='text-decoration: underline;' target='_blank'> Diffusion Cluster Explorer </a> <br>
 
22
  <p style="margin-bottom: 14px; font-size: 100%"> As AI-enabled Text-to-Image models are becoming increasingly used, characterizing the social biases they exhibit is a necessary first step to lowering their risk of discriminatory outcomes. <br> We compare three such models: <b> Stable Diffusion v.1.4, Stable Diffusion v.2. </b>, and <b> Dall-E 2 </b>, prompting them to produce images of different <i> professions </i> and <i> identity characteristics </i>. <br> Since artificial depictions of fictive humans have no inherent gender or ethnicity nor do they belong to socially-constructed groups, we pursued our analysis <i> without </i> ascribing gender and ethnicity categories to the images generated, still finding clear evidence of ethnicity and gender biases. You can explore these findings in the sections below: </p>
23
  ''')
24
 
25
+ gr.Markdown("""
26
+ ## Identity group results (ethnicity and gender)
27
+ """)
28
 
29
  with gr.Accordion("Identity group results (ethnicity and gender)", open=False):
30
  gr.HTML('''
 
40
  gr.HTML('''
41
  <p style="margin-bottom: 14px; font-size: 100%"> You can see that the models reflect many societal biases -- for instance representing Native Americans wearing traditional headdresses, non-binary people with stereotypical haircuts and glasses, and East Asian men with features that amplify ethnic stereotypes. <br> This is problematic because it reinforces existing cultural stereotypes and fails to represent the diversity that is present in all identity groups.</p>
42
  ''')
43
+ gr.Markdown("""
44
+ ## Bias Exploration
45
+ """)
46
+
47
 
48
  with gr.Accordion("Bias Exploration", open=False):
49
  gr.HTML('''
 
55
  <p style="margin-bottom: 14px; font-size: 100%"> Using the <b><a href='https://huggingface.co/spaces/society-ethics/DiffusionClustering' style='text-decoration: underline;' target='_blank'> Diffusion Cluster Explorer</a></b>, we can see that the top cluster for the CEO and director professions is <b> Cluster 4</b>: </p> ''')
56
  with gr.Column():
57
  ceo_img = gr.Image(Image.open("images/bias/ceo_dir.png"), label = "CEO Image", show_label=False)
58
+
59
  with gr.Row():
60
  with gr.Column():
61
  gr.HTML('''
 
75
  with gr.Column(scale=2):
76
  cluster4 = gr.Image(Image.open("images/bias/Cluster2.png"), label = "Cluster 2 Image", show_label=False)
77
 
78
+ gr.Markdown("""
79
+ ## Comparing model generations
80
+ """)
81
+
82
  with gr.Accordion("Comparing model generations", open=False):
83
  gr.HTML('''
84
  <p style="margin-bottom: 14px; font-size: 100%"> One of the goals of our study was allowing users to compare model generations across professions in an open-ended way, uncovering patterns and trends on their own. This is why we created the <a href='https://huggingface.co/spaces/society-ethics/DiffusionBiasExplorer' style='text-decoration: underline;' target='_blank'> Diffusion Bias Explorer </a> and the <a href='https://huggingface.co/spaces/society-ethics/Average_diffusion_faces' style='text-decoration: underline;' target='_blank'> Average Diffusion Faces </a> tools. We show some of their functionalities below: </p> ''')
 
99
  gr.HTML('''
100
  <p style="margin-bottom: 14px; font-size: 100%"> Looking at the average faces for a given profession across multiple models can help see the dominant characteristics of that profession, as well as how much variation there is (based on how fuzzy the image is). <br> In the images shown here, we can see that representations of these professions significantly differ across the three models, while sharing common characteristics, e.g. <i> postal workers </i> all wear caps. <br> Also, the average faces of <i> hairdressers </i> seem more fuzzy than the other professions, indicating a higher diversity among the generations compared to other professions. <br> Look at the <a href='https://huggingface.co/spaces/society-ethics/Average_diffusion_faces' style='text-decoration: underline;' target='_blank'> Average Diffusion Faces </a> tool for more examples! </p>''')
101
 
102
+ gr.Markdown("""
103
+ ## Exploring the Pixel Space of Generated Images
104
+ """)
105
  with gr.Accordion("Exploring the pixel space of generated images", open=False):
106
  gr.HTML('''
107
  <br>
 
116
  ### All of the tools created as part of this project:
117
  """)
118
  gr.HTML('''
119
+ <p style="margin-bottom: 10px; font-size: 100%">
120
  <a href='https://huggingface.co/spaces/society-ethics/Average_diffusion_faces' style='text-decoration: underline;' target='_blank'> Average Diffusion Faces </a> <br>
121
  <a href='https://huggingface.co/spaces/society-ethics/DiffusionBiasExplorer' style='text-decoration: underline;' target='_blank'> Diffusion Bias Explorer </a> <br>
122
  <a href='https://huggingface.co/spaces/society-ethics/DiffusionClustering' style='text-decoration: underline;' target='_blank'> Diffusion Cluster Explorer </a> <br>