DamonDemon commited on
Commit
6fab635
1 Parent(s): aa32379

refine func

Browse files
Files changed (2) hide show
  1. app.py +20 -33
  2. src/display/about.py +5 -3
app.py CHANGED
@@ -25,14 +25,6 @@ from src.display.utils import (
25
  )
26
  from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, H4_TOKEN, IS_PUBLIC, QUEUE_REPO, REPO_ID, RESULTS_REPO
27
  from PIL import Image
28
- # from src.populate import get_evaluation_queue_df, get_leaderboard_df
29
- # from src.submission.submit import add_new_eval
30
- # from src.tools.collections import update_collections
31
- # from src.tools.plots import (
32
- # create_metric_plot_obj,
33
- # create_plot_df,
34
- # create_scores_df,
35
- # )
36
  from dummydatagen import dummy_data_for_plot, create_metric_plot_obj_1, dummydf
37
  import copy
38
 
@@ -40,49 +32,45 @@ import copy
40
  def restart_space():
41
  API.restart_space(repo_id=REPO_ID, token=H4_TOKEN)
42
 
43
-
44
-
45
  gtbench_raw_data = dummydf()
46
  methods = list(set(gtbench_raw_data['Method']))
47
 
48
-
49
-
50
-
51
  # Searching and filtering
52
-
53
 
54
  def update_table(
55
  hidden_df: pd.DataFrame,
56
- columns: list,
 
 
57
  model1: list,
58
  ):
59
 
60
- filtered_df = select_columns(hidden_df, columns)
61
 
62
  filtered_df = filter_model1(filtered_df, model1)
63
 
64
  return filtered_df
65
 
66
 
67
- def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
68
  always_here_cols = [
69
- "Model", "Agent", "Opponent Model", "Opponent Agent"
70
  ]
71
  # We use COLS to maintain sorting
72
  all_columns = metrics
73
 
74
- if len(columns) == 0:
75
  filtered_df = df[
76
  always_here_cols +
77
  [c for c in all_columns if c in df.columns]
78
  ]
79
 
80
- return filtered_df
81
-
82
- filtered_df = df[
83
- always_here_cols +
84
- [c for c in all_columns if c in df.columns and c in columns]
85
- ]
86
 
87
  return filtered_df
88
 
@@ -101,9 +89,6 @@ def filter_model1(
101
 
102
 
103
 
104
- metrics = ["Style-UA", "Style-IRA", "Style-CRA", "Object-UA", "Object-IRA", "Object-CRA", "FID", "run-time", "storage", "memory"]
105
-
106
-
107
  demo = gr.Blocks(css=custom_css)
108
 
109
 
@@ -135,7 +120,7 @@ with demo:
135
  )
136
 
137
  with gr.Row():
138
- shown_columns_3 = gr.CheckboxGroup(
139
  choices=["FID"],
140
  label="Image Quality",
141
  elem_id="column-select",
@@ -143,7 +128,7 @@ with demo:
143
  )
144
 
145
  with gr.Row():
146
- shown_columns_4 = gr.CheckboxGroup(
147
  choices=["Time (s)", "Memory (GB)", "Storage (GB)"],
148
  label="Resource Costs",
149
  elem_id="column-select",
@@ -168,13 +153,15 @@ with demo:
168
  )
169
 
170
 
171
- for selector in [model1_column]:
172
  selector.change(
173
  update_table,
174
  [
175
- model1_column,
176
  game_bench_df_for_search,
177
-
 
 
 
178
  ],
179
  leaderboard_table,
180
  queue=True,
 
25
  )
26
  from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, H4_TOKEN, IS_PUBLIC, QUEUE_REPO, REPO_ID, RESULTS_REPO
27
  from PIL import Image
 
 
 
 
 
 
 
 
28
  from dummydatagen import dummy_data_for_plot, create_metric_plot_obj_1, dummydf
29
  import copy
30
 
 
32
  def restart_space():
33
  API.restart_space(repo_id=REPO_ID, token=H4_TOKEN)
34
 
 
 
35
  gtbench_raw_data = dummydf()
36
  methods = list(set(gtbench_raw_data['Method']))
37
 
 
 
 
38
  # Searching and filtering
39
+ metrics = ["Style-UA", "Style-IRA", "Style-CRA", "Object-UA", "Object-IRA", "Object-CRA", "FID", "run-time", "storage", "memory"]
40
 
41
  def update_table(
42
  hidden_df: pd.DataFrame,
43
+ columns_1: list,
44
+ columns_2: list,
45
+ columns_3: list,
46
  model1: list,
47
  ):
48
 
49
+ filtered_df = select_columns(hidden_df, columns_1, columns_2, columns_3)
50
 
51
  filtered_df = filter_model1(filtered_df, model1)
52
 
53
  return filtered_df
54
 
55
 
56
+ def select_columns(df: pd.DataFrame, columns_1: list, columns_2: list, columns_3: list) -> pd.DataFrame:
57
  always_here_cols = [
58
+ "Method"
59
  ]
60
  # We use COLS to maintain sorting
61
  all_columns = metrics
62
 
63
+ if (len(columns_1)+len(columns_2) + len(columns_3)) == 0:
64
  filtered_df = df[
65
  always_here_cols +
66
  [c for c in all_columns if c in df.columns]
67
  ]
68
 
69
+ else:
70
+ filtered_df = df[
71
+ always_here_cols +
72
+ [c for c in all_columns if c in df.columns and (c in columns_1 or c in columns_2 or c in columns_3 ) ]
73
+ ]
 
74
 
75
  return filtered_df
76
 
 
89
 
90
 
91
 
 
 
 
92
  demo = gr.Blocks(css=custom_css)
93
 
94
 
 
120
  )
121
 
122
  with gr.Row():
123
+ shown_columns_2 = gr.CheckboxGroup(
124
  choices=["FID"],
125
  label="Image Quality",
126
  elem_id="column-select",
 
128
  )
129
 
130
  with gr.Row():
131
+ shown_columns_3 = gr.CheckboxGroup(
132
  choices=["Time (s)", "Memory (GB)", "Storage (GB)"],
133
  label="Resource Costs",
134
  elem_id="column-select",
 
153
  )
154
 
155
 
156
+ for selector in [shown_columns_1,shown_columns_2, shown_columns_3, model1_column]:
157
  selector.change(
158
  update_table,
159
  [
 
160
  game_bench_df_for_search,
161
+ shown_columns_1,
162
+ shown_columns_2,
163
+ shown_columns_3,
164
+ model1_column,
165
  ],
166
  leaderboard_table,
167
  queue=True,
src/display/about.py CHANGED
@@ -7,11 +7,13 @@ INTRODUCTION_TEXT = """
7
 
8
  paper: https://arxiv.org/abs/2402.11846
9
 
10
- The rapid advancement of diffusion models (DMs) has not only transformed various real- world industries but has also introduced negative societal concerns, including the generation of harmful content, copyright disputes, and the rise of stereotypes and biases. To mitigate these issues, machine unlearning (MU) has emerged as a potential solution, demonstrating its ability to remove undesired generative capabilities of DMs in various applications. However, by examining existing MU evaluation methods, we uncover several key challenges that can result in incomplete, inaccurate, or biased evaluations for MU in DMs.
11
 
12
- To address them, we enhance the evaluation metrics for MU, including the introduction of an often-overlooked retainability measurement for DMs post-unlearning. Additionally, we introduce UnlearnCanvas, a comprehensive high-resolution stylized image dataset that facilitates us to evaluate the unlearning of artistic painting styles in conjunction with associated image objects.
13
 
14
- We show that this dataset plays a pivotal role in establishing a standardized and automated evaluation framework for MU techniques on DMs, featuring 7 quantitative metrics to address various aspects of unlearning effectiveness. Through extensive experiments, we benchmark 5 state-of- the-art MU methods, revealing novel insights into their pros and cons, and the underlying unlearning mechanisms. Furthermore, we demonstrate the potential of UnlearnCanvas to benchmark other generative modeling tasks, such as style transfer. The UnlearnCanvas dataset, benchmark, and the codes to reproduce all the results in this work can be found at https://github.com/OPTML-Group/UnlearnCanvas.
 
 
15
 
16
  """
17
 
 
7
 
8
  paper: https://arxiv.org/abs/2402.11846
9
 
10
+ Code: https://github.com/OPTML-Group/UnlearnCanvas
11
 
12
+ The rapid advancement of diffusion models (DMs) has not only transformed various real- world industries but has also introduced negative societal concerns, including the generation of harmful content, copyright disputes, and the rise of stereotypes and biases. <strong>To mitigate these issues, machine unlearning (MU) has emerged as a potential solution, demonstrating its ability to remove undesired generative capabilities of DMs in various applications.</strong> However, by examining existing MU evaluation methods, we uncover several key challenges that can result in incomplete, inaccurate, or biased evaluations for MU in DMs.
13
 
14
+ To address them, we enhance the evaluation metrics for MU, including the introduction of an often-overlooked retainability measurement for DMs post-unlearning. Additionally, we introduce <strong>UnlearnCanvas, a comprehensive high-resolution stylized image dataset</strong> that facilitates us to evaluate the unlearning of artistic painting styles in conjunction with associated image objects.
15
+
16
+ We show that this dataset plays a pivotal role in establishing a standardized and automated evaluation framework for MU techniques on DMs, featuring 7 quantitative metrics to address various aspects of unlearning effectiveness. Through extensive experiments, we benchmark 5 state-of- the-art MU methods, revealing novel insights into their pros and cons, and the underlying unlearning mechanisms. Furthermore, we demonstrate the potential of UnlearnCanvas to benchmark other generative modeling tasks, such as style transfer.
17
 
18
  """
19