rapadilla commited on
Commit
65cfe9e
β€’
1 Parent(s): 7022444
Files changed (1) hide show
  1. app.py +102 -57
app.py CHANGED
@@ -1,73 +1,93 @@
1
  import gradio as gr
2
  import pandas as pd
3
  import json
4
- from constants import BANNER, INTRODUCTION_TEXT, CITATION_TEXT, METRICS_TAB_TEXT, DIR_OUTPUT_REQUESTS
 
 
 
 
 
 
5
  from init import is_model_on_hub, upload_file, load_all_info_from_dataset_hub
6
- from utils_display import AutoEvalColumn, fields, make_clickable_model, styled_error, styled_message
 
 
 
 
 
 
7
  from datetime import datetime, timezone
8
 
9
  LAST_UPDATED = "September, 7th 2023"
10
  GPU_MODEL = "NVIDIA Tesla M60"
11
 
12
- column_names = {"model": "model",
13
- "AP-IoU=0.50:0.95-area=all-maxDets=100": "AP",
14
- "AP-IoU=0.50-area=all-maxDets=100": "AP@.50",
15
- "AP-IoU=0.75-area=all-maxDets=100": "AP@.75",
16
- "AP-IoU=0.50:0.95-area=small-maxDets=100" : "AP-S",
17
- "AP-IoU=0.50:0.95-area=medium-maxDets=100": "AP-M",
18
- "AP-IoU=0.50:0.95-area=large-maxDets=100": "AP-L",
19
- "AR-IoU=0.50:0.95-area=all-maxDets=1": "AR1",
20
- "AR-IoU=0.50:0.95-area=all-maxDets=10": "AR10",
21
- "AR-IoU=0.50:0.95-area=all-maxDets=100": "AR100",
22
- "AR-IoU=0.50:0.95-area=small-maxDets=100": "AR-S",
23
- "AR-IoU=0.50:0.95-area=medium-maxDets=100": "AR-M",
24
- "AR-IoU=0.50:0.95-area=large-maxDets=100": "AR-L",
25
- "estimated_fps": "FPS(*)",
26
- "hub_license": "hub license",
27
- }
 
28
 
29
  eval_queue_repo, requested_models, csv_results = load_all_info_from_dataset_hub()
30
 
31
  if not csv_results.exists():
32
  raise Exception(f"CSV file {csv_results} does not exist locally")
33
-
34
  # Get csv with data and parse columns
35
  original_df = pd.read_csv(csv_results)
36
  lst_evaluated_models = original_df["model"].tolist()
37
  lst_evaluated_models = list(map(str.lower, lst_evaluated_models))
38
 
 
39
  # Formats the columns
40
  def decimal_formatter(x):
41
  x = "{:.2f}".format(x)
42
  return x
43
 
 
44
  def perc_formatter(x):
45
  x = "{:.2%}".format(x)
46
  while len(x) < 6:
47
  x = f"0{x}"
48
  return x
49
 
 
50
  # Drop columns not specified in dictionary
51
  cols_to_drop = [col for col in original_df.columns if col not in column_names]
52
  original_df.drop(cols_to_drop, axis=1, inplace=True)
53
 
54
  for col in original_df.columns:
55
  if col == "model":
56
- original_df[col] = original_df[col].apply(lambda x: x.replace(x, make_clickable_model(x)))
 
 
57
  elif col == "estimated_fps":
58
- original_df[col] = original_df[col].apply(decimal_formatter) # For decimal values
 
 
59
  elif col == "hub_license":
60
  continue
61
  else:
62
- original_df[col] = original_df[col].apply(perc_formatter) # For % values
63
-
64
  original_df.rename(columns=column_names, inplace=True)
65
 
66
  COLS = [c.name for c in fields(AutoEvalColumn)]
67
  TYPES = [c.type for c in fields(AutoEvalColumn)]
68
 
 
69
  def request_model(model_text, chbcoco2017):
70
-
71
  # Determine the selected checkboxes
72
  dataset_selection = []
73
  if chbcoco2017:
@@ -75,33 +95,37 @@ def request_model(model_text, chbcoco2017):
75
 
76
  if len(dataset_selection) == 0:
77
  return styled_error("You need to select at least one dataset")
78
-
79
- # Check if model exists on the hub
80
  base_model_on_hub, error_msg = is_model_on_hub(model_text)
81
  if not base_model_on_hub:
82
  return styled_error(f"Base model '{model_text}' {error_msg}")
83
-
84
  # Check if model is already evaluated
85
- model_text = model_text.replace(" ","")
86
  if model_text.lower() in lst_evaluated_models:
87
- return styled_error(f"Results of the model '{model_text}' are now ready and available.")
88
-
 
 
89
  # Construct the output dictionary
90
  current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
91
- required_datasets = ', '.join(dataset_selection)
92
  eval_entry = {
93
  "date": current_time,
94
  "model": model_text,
95
- "datasets_selected": required_datasets
96
  }
97
-
98
- # Prepare file path
99
  DIR_OUTPUT_REQUESTS.mkdir(parents=True, exist_ok=True)
100
-
101
- fn_datasets = '@ '.join(dataset_selection)
102
- filename = model_text.replace("/","@") + "@@" + fn_datasets
103
  if filename in requested_models:
104
- return styled_error(f"A request for this model '{model_text}' and dataset(s) was already made.")
 
 
105
  try:
106
  filename_ext = filename + ".txt"
107
  out_filepath = DIR_OUTPUT_REQUESTS / filename_ext
@@ -109,18 +133,21 @@ def request_model(model_text, chbcoco2017):
109
  # Write the results to a text file
110
  with open(out_filepath, "w") as f:
111
  f.write(json.dumps(eval_entry))
112
-
113
  upload_file(filename, out_filepath)
114
-
115
  # Include file in the list of uploaded files
116
  requested_models.append(filename)
117
-
118
  # Remove the local file
119
  out_filepath.unlink()
120
 
121
- return styled_message("πŸ€— Your request has been submitted and will be evaluated soon!</p>")
122
- except Exception as e:
123
- return styled_error(f"Error submitting request!")
 
 
 
124
 
125
  with gr.Blocks() as demo:
126
  gr.HTML(BANNER, elem_id="banner")
@@ -131,39 +158,57 @@ with gr.Blocks() as demo:
131
  leaderboard_table = gr.components.Dataframe(
132
  value=original_df,
133
  datatype=TYPES,
134
- max_rows=None,
135
  elem_id="leaderboard-table",
136
  interactive=False,
137
  visible=True,
138
- )
139
 
140
  with gr.TabItem("πŸ“ˆ Metrics", elem_id="od-benchmark-tab-table", id=1):
141
  gr.Markdown(METRICS_TAB_TEXT, elem_classes="markdown-text")
142
 
143
- with gr.TabItem("βœ‰οΈβœ¨ Request a model here!", elem_id="od-benchmark-tab-table", id=2):
 
 
144
  with gr.Column():
145
- gr.Markdown("# βœ‰οΈβœ¨ Request results for a new model here!", elem_classes="markdown-text")
 
 
 
146
  with gr.Column():
147
  gr.Markdown("Select a dataset:", elem_classes="markdown-text")
148
  with gr.Column():
149
- model_name_textbox = gr.Textbox(label="Model name (user_name/model_name)")
150
- chb_coco2017 = gr.Checkbox(label="COCO validation 2017 dataset", visible=False, value=True, interactive=False)
 
 
 
 
 
 
 
151
  with gr.Column():
152
  mdw_submission_result = gr.Markdown()
153
  btn_submitt = gr.Button(value="πŸš€ Request")
154
- btn_submitt.click(request_model,
155
- [model_name_textbox, chb_coco2017],
156
- mdw_submission_result)
157
-
158
- gr.Markdown(f"(*) FPS was measured using *{GPU_MODEL}* processing 1 image per batch. Refer to the πŸ“ˆ \"Metrics\" tab for further details.", elem_classes="markdown-text")
 
 
 
 
 
159
  gr.Markdown(f"Last updated on **{LAST_UPDATED}**", elem_classes="markdown-text")
160
 
161
  with gr.Row():
162
  with gr.Accordion("πŸ“™ Citation", open=False):
163
  gr.Textbox(
164
- value=CITATION_TEXT, lines=7,
 
165
  label="Copy the BibTeX snippet to cite this source",
166
  elem_id="citation-button",
167
- ).style(show_copy_button=True)
 
168
 
169
  demo.launch()
 
1
  import gradio as gr
2
  import pandas as pd
3
  import json
4
+ from constants import (
5
+ BANNER,
6
+ INTRODUCTION_TEXT,
7
+ CITATION_TEXT,
8
+ METRICS_TAB_TEXT,
9
+ DIR_OUTPUT_REQUESTS,
10
+ )
11
  from init import is_model_on_hub, upload_file, load_all_info_from_dataset_hub
12
+ from utils_display import (
13
+ AutoEvalColumn,
14
+ fields,
15
+ make_clickable_model,
16
+ styled_error,
17
+ styled_message,
18
+ )
19
  from datetime import datetime, timezone
20
 
21
  LAST_UPDATED = "September, 7th 2023"
22
  GPU_MODEL = "NVIDIA Tesla M60"
23
 
24
+ column_names = {
25
+ "model": "model",
26
+ "AP-IoU=0.50:0.95-area=all-maxDets=100": "AP",
27
+ "AP-IoU=0.50-area=all-maxDets=100": "AP@.50",
28
+ "AP-IoU=0.75-area=all-maxDets=100": "AP@.75",
29
+ "AP-IoU=0.50:0.95-area=small-maxDets=100": "AP-S",
30
+ "AP-IoU=0.50:0.95-area=medium-maxDets=100": "AP-M",
31
+ "AP-IoU=0.50:0.95-area=large-maxDets=100": "AP-L",
32
+ "AR-IoU=0.50:0.95-area=all-maxDets=1": "AR1",
33
+ "AR-IoU=0.50:0.95-area=all-maxDets=10": "AR10",
34
+ "AR-IoU=0.50:0.95-area=all-maxDets=100": "AR100",
35
+ "AR-IoU=0.50:0.95-area=small-maxDets=100": "AR-S",
36
+ "AR-IoU=0.50:0.95-area=medium-maxDets=100": "AR-M",
37
+ "AR-IoU=0.50:0.95-area=large-maxDets=100": "AR-L",
38
+ "estimated_fps": "FPS(*)",
39
+ "hub_license": "hub license",
40
+ }
41
 
42
  eval_queue_repo, requested_models, csv_results = load_all_info_from_dataset_hub()
43
 
44
  if not csv_results.exists():
45
  raise Exception(f"CSV file {csv_results} does not exist locally")
46
+
47
  # Get csv with data and parse columns
48
  original_df = pd.read_csv(csv_results)
49
  lst_evaluated_models = original_df["model"].tolist()
50
  lst_evaluated_models = list(map(str.lower, lst_evaluated_models))
51
 
52
+
53
  # Formats the columns
54
  def decimal_formatter(x):
55
  x = "{:.2f}".format(x)
56
  return x
57
 
58
+
59
  def perc_formatter(x):
60
  x = "{:.2%}".format(x)
61
  while len(x) < 6:
62
  x = f"0{x}"
63
  return x
64
 
65
+
66
  # Drop columns not specified in dictionary
67
  cols_to_drop = [col for col in original_df.columns if col not in column_names]
68
  original_df.drop(cols_to_drop, axis=1, inplace=True)
69
 
70
  for col in original_df.columns:
71
  if col == "model":
72
+ original_df[col] = original_df[col].apply(
73
+ lambda x: x.replace(x, make_clickable_model(x))
74
+ )
75
  elif col == "estimated_fps":
76
+ original_df[col] = original_df[col].apply(
77
+ decimal_formatter
78
+ ) # For decimal values
79
  elif col == "hub_license":
80
  continue
81
  else:
82
+ original_df[col] = original_df[col].apply(perc_formatter) # For % values
83
+
84
  original_df.rename(columns=column_names, inplace=True)
85
 
86
  COLS = [c.name for c in fields(AutoEvalColumn)]
87
  TYPES = [c.type for c in fields(AutoEvalColumn)]
88
 
89
+
90
  def request_model(model_text, chbcoco2017):
 
91
  # Determine the selected checkboxes
92
  dataset_selection = []
93
  if chbcoco2017:
 
95
 
96
  if len(dataset_selection) == 0:
97
  return styled_error("You need to select at least one dataset")
98
+
99
+ # Check if model exists on the hub
100
  base_model_on_hub, error_msg = is_model_on_hub(model_text)
101
  if not base_model_on_hub:
102
  return styled_error(f"Base model '{model_text}' {error_msg}")
103
+
104
  # Check if model is already evaluated
105
+ model_text = model_text.replace(" ", "")
106
  if model_text.lower() in lst_evaluated_models:
107
+ return styled_error(
108
+ f"Results of the model '{model_text}' are now ready and available."
109
+ )
110
+
111
  # Construct the output dictionary
112
  current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
113
+ required_datasets = ", ".join(dataset_selection)
114
  eval_entry = {
115
  "date": current_time,
116
  "model": model_text,
117
+ "datasets_selected": required_datasets,
118
  }
119
+
120
+ # Prepare file path
121
  DIR_OUTPUT_REQUESTS.mkdir(parents=True, exist_ok=True)
122
+
123
+ fn_datasets = "@ ".join(dataset_selection)
124
+ filename = model_text.replace("/", "@") + "@@" + fn_datasets
125
  if filename in requested_models:
126
+ return styled_error(
127
+ f"A request for this model '{model_text}' and dataset(s) was already made."
128
+ )
129
  try:
130
  filename_ext = filename + ".txt"
131
  out_filepath = DIR_OUTPUT_REQUESTS / filename_ext
 
133
  # Write the results to a text file
134
  with open(out_filepath, "w") as f:
135
  f.write(json.dumps(eval_entry))
136
+
137
  upload_file(filename, out_filepath)
138
+
139
  # Include file in the list of uploaded files
140
  requested_models.append(filename)
141
+
142
  # Remove the local file
143
  out_filepath.unlink()
144
 
145
+ return styled_message(
146
+ "πŸ€— Your request has been submitted and will be evaluated soon!</p>"
147
+ )
148
+ except Exception:
149
+ return styled_error("Error submitting request!")
150
+
151
 
152
  with gr.Blocks() as demo:
153
  gr.HTML(BANNER, elem_id="banner")
 
158
  leaderboard_table = gr.components.Dataframe(
159
  value=original_df,
160
  datatype=TYPES,
 
161
  elem_id="leaderboard-table",
162
  interactive=False,
163
  visible=True,
164
+ )
165
 
166
  with gr.TabItem("πŸ“ˆ Metrics", elem_id="od-benchmark-tab-table", id=1):
167
  gr.Markdown(METRICS_TAB_TEXT, elem_classes="markdown-text")
168
 
169
+ with gr.TabItem(
170
+ "βœ‰οΈβœ¨ Request a model here!", elem_id="od-benchmark-tab-table", id=2
171
+ ):
172
  with gr.Column():
173
+ gr.Markdown(
174
+ "# βœ‰οΈβœ¨ Request results for a new model here!",
175
+ elem_classes="markdown-text",
176
+ )
177
  with gr.Column():
178
  gr.Markdown("Select a dataset:", elem_classes="markdown-text")
179
  with gr.Column():
180
+ model_name_textbox = gr.Textbox(
181
+ label="Model name (user_name/model_name)"
182
+ )
183
+ chb_coco2017 = gr.Checkbox(
184
+ label="COCO validation 2017 dataset",
185
+ visible=False,
186
+ value=True,
187
+ interactive=False,
188
+ )
189
  with gr.Column():
190
  mdw_submission_result = gr.Markdown()
191
  btn_submitt = gr.Button(value="πŸš€ Request")
192
+ btn_submitt.click(
193
+ request_model,
194
+ [model_name_textbox, chb_coco2017],
195
+ mdw_submission_result,
196
+ )
197
+
198
+ gr.Markdown(
199
+ f'(*) FPS was measured using *{GPU_MODEL}* processing 1 image per batch. Refer to the πŸ“ˆ "Metrics" tab for further details.',
200
+ elem_classes="markdown-text",
201
+ )
202
  gr.Markdown(f"Last updated on **{LAST_UPDATED}**", elem_classes="markdown-text")
203
 
204
  with gr.Row():
205
  with gr.Accordion("πŸ“™ Citation", open=False):
206
  gr.Textbox(
207
+ value=CITATION_TEXT,
208
+ lines=7,
209
  label="Copy the BibTeX snippet to cite this source",
210
  elem_id="citation-button",
211
+ show_copy_button=True,
212
+ )
213
 
214
  demo.launch()