Spaces:
Running
Running
fix filters
Browse files- src/app.py +13 -6
- src/components/filters.py +32 -21
- src/components/visualizations.py +60 -82
- src/services/firebase.py +9 -1
src/app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import asyncio
|
2 |
import streamlit as st
|
3 |
import pandas as pd
|
4 |
-
from typing import Optional, List, Set
|
5 |
|
6 |
from .components.filters import render_table_filters, render_plot_filters
|
7 |
from .components.visualizations import (
|
@@ -13,12 +13,19 @@ from .services.firebase import fetch_leaderboard_data
|
|
13 |
from .core.styles import CUSTOM_CSS
|
14 |
|
15 |
|
16 |
-
def
|
17 |
"""Get unique values for filters"""
|
18 |
models = sorted(df["Model ID"].unique().tolist())
|
19 |
platforms = sorted(df["Platform"].unique().tolist())
|
20 |
devices = sorted(df["Device"].unique().tolist())
|
21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
|
24 |
async def main():
|
@@ -43,10 +50,10 @@ async def main():
|
|
43 |
render_header()
|
44 |
|
45 |
# Get unique values for filters
|
46 |
-
models, platforms, devices =
|
47 |
|
48 |
# Render filters
|
49 |
-
table_filters = render_table_filters(models, platforms, devices)
|
50 |
|
51 |
# Render the main leaderboard table
|
52 |
render_leaderboard_table(df, table_filters)
|
@@ -54,7 +61,7 @@ async def main():
|
|
54 |
# Render plot section
|
55 |
st.markdown("---")
|
56 |
st.title("📊 Performance Comparison")
|
57 |
-
plot_filters = render_plot_filters(models, platforms, devices)
|
58 |
render_performance_plots(df, plot_filters)
|
59 |
|
60 |
|
|
|
1 |
import asyncio
|
2 |
import streamlit as st
|
3 |
import pandas as pd
|
4 |
+
from typing import Optional, List, Set, Tuple
|
5 |
|
6 |
from .components.filters import render_table_filters, render_plot_filters
|
7 |
from .components.visualizations import (
|
|
|
13 |
from .core.styles import CUSTOM_CSS
|
14 |
|
15 |
|
16 |
+
def get_filter_values(df: pd.DataFrame) -> tuple[List[str], List[str], List[str], List[str], List[str], Tuple[int, int], Tuple[int, int], Tuple[int, int], List[str], int]:
|
17 |
"""Get unique values for filters"""
|
18 |
models = sorted(df["Model ID"].unique().tolist())
|
19 |
platforms = sorted(df["Platform"].unique().tolist())
|
20 |
devices = sorted(df["Device"].unique().tolist())
|
21 |
+
cache_type_v = sorted(df["cache_type_v"].unique().tolist())
|
22 |
+
cache_type_k = sorted(df["cache_type_k"].unique().tolist())
|
23 |
+
n_threads = (df["n_threads"].min(), df["n_threads"].max())
|
24 |
+
max_n_gpu_layers = max(df["n_gpu_layers"].unique().tolist())
|
25 |
+
pp_range = (df["PP Config"].min(), df["PP Config"].max())
|
26 |
+
tg_range = (df["TG Config"].min(), df["TG Config"].max())
|
27 |
+
versions = sorted(df["Version"].unique().tolist())
|
28 |
+
return models, platforms, devices, cache_type_v, cache_type_k, pp_range, tg_range, n_threads, versions, max_n_gpu_layers
|
29 |
|
30 |
|
31 |
async def main():
|
|
|
50 |
render_header()
|
51 |
|
52 |
# Get unique values for filters
|
53 |
+
models, platforms, devices, cache_type_v, cache_type_k, pp_range, tg_range, n_threads, versions, max_n_gpu_layers = get_filter_values(df)
|
54 |
|
55 |
# Render filters
|
56 |
+
table_filters = render_table_filters(models, platforms, devices, cache_type_v, cache_type_k, pp_range, tg_range, n_threads, versions, max_n_gpu_layers)
|
57 |
|
58 |
# Render the main leaderboard table
|
59 |
render_leaderboard_table(df, table_filters)
|
|
|
61 |
# Render plot section
|
62 |
st.markdown("---")
|
63 |
st.title("📊 Performance Comparison")
|
64 |
+
plot_filters = render_plot_filters(models, platforms, devices, cache_type_v, cache_type_k, pp_range, tg_range, n_threads, versions, max_n_gpu_layers)
|
65 |
render_performance_plots(df, plot_filters)
|
66 |
|
67 |
|
src/components/filters.py
CHANGED
@@ -67,6 +67,7 @@ def render_column_visibility() -> Set[str]:
|
|
67 |
],
|
68 |
"Advanced": [
|
69 |
"n_threads",
|
|
|
70 |
"flash_attn",
|
71 |
"cache_type_k",
|
72 |
"cache_type_v",
|
@@ -74,6 +75,9 @@ def render_column_visibility() -> Set[str]:
|
|
74 |
"n_batch",
|
75 |
"n_ubatch",
|
76 |
],
|
|
|
|
|
|
|
77 |
}
|
78 |
|
79 |
# Default visible columns
|
@@ -94,7 +98,7 @@ def render_column_visibility() -> Set[str]:
|
|
94 |
"TG Config",
|
95 |
}
|
96 |
|
97 |
-
with st.expander("
|
98 |
selected_columns = set()
|
99 |
cols = st.columns(len(column_categories))
|
100 |
for col, (category, columns) in zip(cols, column_categories.items()):
|
@@ -109,7 +113,9 @@ def render_column_visibility() -> Set[str]:
|
|
109 |
return selected_columns
|
110 |
|
111 |
|
112 |
-
def render_filters(models: List[str], platforms: List[str], devices: List[str]
|
|
|
|
|
113 |
"""Render all filters in a compact two-row layout"""
|
114 |
filters = {}
|
115 |
|
@@ -122,11 +128,11 @@ def render_filters(models: List[str], platforms: List[str], devices: List[str])
|
|
122 |
|
123 |
##with col1:
|
124 |
filters["model"] = st.selectbox(
|
125 |
-
"Model", options=["All"] + models, key="filter_model"
|
126 |
)
|
127 |
|
128 |
# Row 2 continued
|
129 |
-
col2, col3, col4, col5, col6, col7, col8, col9 = st.columns(
|
130 |
|
131 |
with col2:
|
132 |
filters["platform"] = st.selectbox(
|
@@ -144,21 +150,22 @@ def render_filters(models: List[str], platforms: List[str], devices: List[str])
|
|
144 |
)
|
145 |
|
146 |
with col5:
|
147 |
-
filters["
|
148 |
-
"Cache Type
|
149 |
)
|
150 |
|
151 |
with col6:
|
152 |
-
filters["
|
153 |
-
"Cache Type
|
154 |
)
|
155 |
|
|
|
156 |
with col7:
|
157 |
filters["pp_range"] = st.slider(
|
158 |
"PP Range",
|
159 |
-
min_value=0,
|
160 |
-
max_value=
|
161 |
-
value=
|
162 |
step=32,
|
163 |
key="filter_pp",
|
164 |
)
|
@@ -166,34 +173,38 @@ def render_filters(models: List[str], platforms: List[str], devices: List[str])
|
|
166 |
with col8:
|
167 |
filters["tg_range"] = st.slider(
|
168 |
"TG Range",
|
169 |
-
min_value=0,
|
170 |
-
max_value=
|
171 |
-
value=
|
172 |
step=32,
|
173 |
key="filter_tg",
|
174 |
)
|
175 |
|
176 |
with col9:
|
177 |
filters["n_threads"] = st.slider(
|
178 |
-
"Threads", min_value=
|
|
|
|
|
|
|
|
|
|
|
179 |
)
|
180 |
|
181 |
# Column visibility control as a small button/dropdown
|
182 |
-
|
183 |
-
# filters["visible_columns"] = render_column_visibility()
|
184 |
|
185 |
return filters
|
186 |
|
187 |
|
188 |
def render_table_filters(
|
189 |
-
models: List[str], platforms: List[str], devices: List[str]
|
190 |
) -> Dict:
|
191 |
"""Main entry point for table filters"""
|
192 |
-
return render_filters(models, platforms, devices)
|
193 |
|
194 |
|
195 |
def render_plot_filters(
|
196 |
-
models: List[str], platforms: List[str], devices: List[str]
|
197 |
) -> Dict:
|
198 |
"""Main entry point for plot filters"""
|
199 |
-
return render_filters(models, platforms, devices)
|
|
|
67 |
],
|
68 |
"Advanced": [
|
69 |
"n_threads",
|
70 |
+
"n_gpu_layers",
|
71 |
"flash_attn",
|
72 |
"cache_type_k",
|
73 |
"cache_type_v",
|
|
|
75 |
"n_batch",
|
76 |
"n_ubatch",
|
77 |
],
|
78 |
+
"App": [
|
79 |
+
"Version",
|
80 |
+
],
|
81 |
}
|
82 |
|
83 |
# Default visible columns
|
|
|
98 |
"TG Config",
|
99 |
}
|
100 |
|
101 |
+
with st.expander("Visible Columns", expanded=False):
|
102 |
selected_columns = set()
|
103 |
cols = st.columns(len(column_categories))
|
104 |
for col, (category, columns) in zip(cols, column_categories.items()):
|
|
|
113 |
return selected_columns
|
114 |
|
115 |
|
116 |
+
def render_filters(models: List[str], platforms: List[str], devices: List[str],
|
117 |
+
cache_type_v: List[str], cache_type_k: List[str], pp_range: Tuple[int, int],
|
118 |
+
tg_range: Tuple[int, int], n_threads: Tuple[int, int], versions: List[str], max_n_gpu_layers: int) -> Dict:
|
119 |
"""Render all filters in a compact two-row layout"""
|
120 |
filters = {}
|
121 |
|
|
|
128 |
|
129 |
##with col1:
|
130 |
filters["model"] = st.selectbox(
|
131 |
+
"Model", options=["All"] + models, key="filter_model", help="Filters"
|
132 |
)
|
133 |
|
134 |
# Row 2 continued
|
135 |
+
col2, col3, col4, col5, col6, col7, col8, col9, col10 = st.columns(9)
|
136 |
|
137 |
with col2:
|
138 |
filters["platform"] = st.selectbox(
|
|
|
150 |
)
|
151 |
|
152 |
with col5:
|
153 |
+
filters["cache_type_k"] = st.selectbox(
|
154 |
+
"Cache Type K", options=["All"] + cache_type_k, key="filter_cache_type_k"
|
155 |
)
|
156 |
|
157 |
with col6:
|
158 |
+
filters["cache_type_v"] = st.selectbox(
|
159 |
+
"Cache Type V", options=["All"] + cache_type_v, key="filter_cache_type_v"
|
160 |
)
|
161 |
|
162 |
+
|
163 |
with col7:
|
164 |
filters["pp_range"] = st.slider(
|
165 |
"PP Range",
|
166 |
+
min_value=pp_range[0],
|
167 |
+
max_value=pp_range[1],
|
168 |
+
value=pp_range,
|
169 |
step=32,
|
170 |
key="filter_pp",
|
171 |
)
|
|
|
173 |
with col8:
|
174 |
filters["tg_range"] = st.slider(
|
175 |
"TG Range",
|
176 |
+
min_value=tg_range[0],
|
177 |
+
max_value=tg_range[1],
|
178 |
+
value=tg_range,
|
179 |
step=32,
|
180 |
key="filter_tg",
|
181 |
)
|
182 |
|
183 |
with col9:
|
184 |
filters["n_threads"] = st.slider(
|
185 |
+
"Threads", min_value=n_threads[0], max_value=n_threads[1], value=n_threads, key="filter_threads"
|
186 |
+
)
|
187 |
+
|
188 |
+
with col10:
|
189 |
+
filters["Version"] = st.multiselect(
|
190 |
+
"Version", options=["All"] + versions, key="filter_version"
|
191 |
)
|
192 |
|
193 |
# Column visibility control as a small button/dropdown
|
194 |
+
filters["visible_columns"] = render_column_visibility()
|
|
|
195 |
|
196 |
return filters
|
197 |
|
198 |
|
199 |
def render_table_filters(
|
200 |
+
models: List[str], platforms: List[str], devices: List[str], cache_type_v: List[str], cache_type_k: List[str], pp_range: Tuple[int, int], tg_range: Tuple[int, int], n_threads: Tuple[int, int], versions: List[str], max_n_gpu_layers: int
|
201 |
) -> Dict:
|
202 |
"""Main entry point for table filters"""
|
203 |
+
return render_filters(models, platforms, devices, cache_type_v, cache_type_k, pp_range, tg_range, n_threads, versions, max_n_gpu_layers)
|
204 |
|
205 |
|
206 |
def render_plot_filters(
|
207 |
+
models: List[str], platforms: List[str], devices: List[str], cache_type_v: List[str], cache_type_k: List[str], pp_range: Tuple[int, int], tg_range: Tuple[int, int], n_threads: Tuple[int, int], versions: List[str], max_n_gpu_layers: int
|
208 |
) -> Dict:
|
209 |
"""Main entry point for plot filters"""
|
210 |
+
return render_filters(models, platforms, devices, cache_type_v, cache_type_k, pp_range, tg_range, n_threads, versions, max_n_gpu_layers)
|
src/components/visualizations.py
CHANGED
@@ -42,56 +42,48 @@ def filter_dataframe(df: pd.DataFrame, filters: Dict) -> pd.DataFrame:
|
|
42 |
filtered_df = df.copy()
|
43 |
|
44 |
# Basic filters
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
pp_values = filtered_df["PP Config"]
|
60 |
-
|
61 |
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
& (tg_values <= tg_max)
|
67 |
-
]
|
68 |
|
69 |
-
# Advanced settings filters
|
70 |
-
advanced = filters["advanced_settings"]
|
71 |
-
if advanced["n_threads"]:
|
72 |
-
n_threads = filtered_df["initSettings"].apply(lambda x: x.get("n_threads"))
|
73 |
-
filtered_df = filtered_df[n_threads.isin(advanced["n_threads"])]
|
74 |
|
75 |
-
|
76 |
-
flash_attn = filtered_df["initSettings"].apply(lambda x: x.get("flash_attn"))
|
77 |
-
filtered_df = filtered_df[flash_attn.isin(advanced["flash_attn"])]
|
78 |
|
79 |
-
if
|
80 |
-
|
81 |
-
lambda x: x.get("cache_type_k")
|
82 |
-
)
|
83 |
-
cache_type_v = filtered_df["initSettings"].apply(
|
84 |
-
lambda x: x.get("cache_type_v")
|
85 |
-
)
|
86 |
filtered_df = filtered_df[
|
87 |
-
(
|
88 |
-
& (cache_type_v.isin(advanced["cache_type"]))
|
89 |
]
|
90 |
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
|
96 |
return filtered_df
|
97 |
|
@@ -118,21 +110,6 @@ def render_performance_plots(df: pd.DataFrame, filters: Dict):
|
|
118 |
## lambda x: int(x.split("tg: ")[1].split(")")[0])
|
119 |
## )
|
120 |
|
121 |
-
# Extract initSettings if not already present
|
122 |
-
if "n_threads" not in filtered_df.columns:
|
123 |
-
filtered_df["n_threads"] = filtered_df["initSettings"].apply(
|
124 |
-
lambda x: x.get("n_threads")
|
125 |
-
)
|
126 |
-
filtered_df["flash_attn"] = filtered_df["initSettings"].apply(
|
127 |
-
lambda x: x.get("flash_attn")
|
128 |
-
)
|
129 |
-
filtered_df["cache_type_k"] = filtered_df["initSettings"].apply(
|
130 |
-
lambda x: x.get("cache_type_k")
|
131 |
-
)
|
132 |
-
filtered_df["cache_type_v"] = filtered_df["initSettings"].apply(
|
133 |
-
lambda x: x.get("cache_type_v")
|
134 |
-
)
|
135 |
-
|
136 |
# Build aggregation dictionary based on available columns
|
137 |
agg_dict = {}
|
138 |
|
@@ -174,10 +151,10 @@ def render_performance_plots(df: pd.DataFrame, filters: Dict):
|
|
174 |
# Rename columns for display
|
175 |
column_mapping = {
|
176 |
"Prompt Processing": "PP Avg (t/s)",
|
177 |
-
#"Prompt Processing (std)": "PP Std (t/s)",
|
178 |
"Prompt Processing (count)": "Runs",
|
179 |
"Token Generation": "TG Avg (t/s)",
|
180 |
-
#"Token Generation (std)": "TG Std (t/s)",
|
181 |
"Memory Usage (%) (mean)": "Memory Usage (%)",
|
182 |
"Memory Usage (GB) (mean)": "Memory Usage (GB)",
|
183 |
"PP Config (first)": "PP Config",
|
@@ -248,33 +225,33 @@ def render_leaderboard_table(df: pd.DataFrame, filters: Dict):
|
|
248 |
for col, agg in {
|
249 |
"Prompt Processing": ["mean", "std"],
|
250 |
"Token Generation": ["mean", "std"],
|
251 |
-
#"Memory Usage (%)": "mean",
|
252 |
-
"Memory Usage (GB)": "mean",
|
253 |
-
"Total Memory (GB)": "first",
|
254 |
-
"CPU Cores": "first",
|
255 |
-
"Model Size": "first",
|
256 |
}.items()
|
257 |
if col not in grouping_cols
|
258 |
}
|
259 |
|
260 |
-
# Extract initSettings if needed
|
261 |
-
init_settings_cols = {
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
}
|
270 |
-
|
271 |
-
for col, setting in init_settings_cols.items():
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
|
279 |
# Group and aggregate
|
280 |
grouped_df = filtered_df.groupby(grouping_cols).agg(agg_dict).reset_index()
|
@@ -356,6 +333,7 @@ def render_leaderboard_table(df: pd.DataFrame, filters: Dict):
|
|
356 |
"n_context": "n_context",
|
357 |
"n_batch": "n_batch",
|
358 |
"n_ubatch": "n_ubatch",
|
|
|
359 |
}
|
360 |
|
361 |
# Convert visible columns and grouping columns to their mapped names
|
|
|
42 |
filtered_df = df.copy()
|
43 |
|
44 |
# Basic filters
|
45 |
+
if filters["model"] != "All":
|
46 |
+
filtered_df = filtered_df[filtered_df["Model ID"] == filters["model"]]
|
47 |
+
if filters["platform"] != "All":
|
48 |
+
filtered_df = filtered_df[filtered_df["Platform"] == filters["platform"]]
|
49 |
+
if filters["device"] != "All":
|
50 |
+
filtered_df = filtered_df[filtered_df["Device"] == filters["device"]]
|
51 |
+
|
52 |
+
# Flash Attention filter
|
53 |
+
if filters["flash_attn"] != "All":
|
54 |
+
filtered_df = filtered_df[filtered_df["flash_attn"] == filters["flash_attn"]]
|
55 |
+
|
56 |
+
# Cache Type filters
|
57 |
+
if filters["cache_type_k"] != "All":
|
58 |
+
filtered_df = filtered_df[filtered_df["cache_type_k"] == filters["cache_type_k"]]
|
59 |
+
|
60 |
+
if filters["cache_type_v"] != "All":
|
61 |
+
filtered_df = filtered_df[filtered_df["cache_type_v"] == filters["cache_type_v"]]
|
62 |
+
|
63 |
+
# Range filters
|
64 |
+
pp_min, pp_max = filters["pp_range"]
|
65 |
+
if pp_min is not None and pp_max is not None:
|
66 |
pp_values = filtered_df["PP Config"]
|
67 |
+
filtered_df = filtered_df[(pp_values >= pp_min) & (pp_values <= pp_max)]
|
68 |
|
69 |
+
tg_min, tg_max = filters["tg_range"]
|
70 |
+
if tg_min is not None and tg_max is not None:
|
71 |
+
tg_values = filtered_df["TG Config"]
|
72 |
+
filtered_df = filtered_df[(tg_values >= tg_min) & (tg_values <= tg_max)]
|
|
|
|
|
73 |
|
|
|
|
|
|
|
|
|
|
|
74 |
|
75 |
+
n_threads_min, n_threads_max = filters["n_threads"]
|
|
|
|
|
76 |
|
77 |
+
if n_threads_min is not None and n_threads_max is not None:
|
78 |
+
n_threads = filtered_df["n_threads"]
|
|
|
|
|
|
|
|
|
|
|
79 |
filtered_df = filtered_df[
|
80 |
+
(n_threads >= n_threads_min) & (n_threads <= n_threads_max)
|
|
|
81 |
]
|
82 |
|
83 |
+
# Version filter - handle multiple selections
|
84 |
+
if filters.get("version") != "All" and filters.get("version"):
|
85 |
+
filtered_df = filtered_df[filtered_df["Version"].isin(filters["version"])]
|
86 |
+
|
87 |
|
88 |
return filtered_df
|
89 |
|
|
|
110 |
## lambda x: int(x.split("tg: ")[1].split(")")[0])
|
111 |
## )
|
112 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
# Build aggregation dictionary based on available columns
|
114 |
agg_dict = {}
|
115 |
|
|
|
151 |
# Rename columns for display
|
152 |
column_mapping = {
|
153 |
"Prompt Processing": "PP Avg (t/s)",
|
154 |
+
# "Prompt Processing (std)": "PP Std (t/s)",
|
155 |
"Prompt Processing (count)": "Runs",
|
156 |
"Token Generation": "TG Avg (t/s)",
|
157 |
+
# "Token Generation (std)": "TG Std (t/s)",
|
158 |
"Memory Usage (%) (mean)": "Memory Usage (%)",
|
159 |
"Memory Usage (GB) (mean)": "Memory Usage (GB)",
|
160 |
"PP Config (first)": "PP Config",
|
|
|
225 |
for col, agg in {
|
226 |
"Prompt Processing": ["mean", "std"],
|
227 |
"Token Generation": ["mean", "std"],
|
228 |
+
# "Memory Usage (%)": "mean",
|
229 |
+
"Memory Usage (GB)": "mean", # For a given model, device, platform, mem should be the same.
|
230 |
+
"Total Memory (GB)": "first", # For a given model, device, platform, mem should be the same.
|
231 |
+
"CPU Cores": "first", # For a given model, device, platform, cpu cores should be the same.
|
232 |
+
"Model Size": "first", # model size should be the same for all.
|
233 |
}.items()
|
234 |
if col not in grouping_cols
|
235 |
}
|
236 |
|
237 |
+
# # Extract initSettings if needed
|
238 |
+
# init_settings_cols = {
|
239 |
+
# "n_threads": "n_threads",
|
240 |
+
# "flash_attn": "flash_attn",
|
241 |
+
# "cache_type_k": "cache_type_k",
|
242 |
+
# "cache_type_v": "cache_type_v",
|
243 |
+
# "n_context": "n_context",
|
244 |
+
# "n_batch": "n_batch",
|
245 |
+
# "n_ubatch": "n_ubatch",
|
246 |
+
# }
|
247 |
+
|
248 |
+
# for col, setting in init_settings_cols.items():
|
249 |
+
# if col not in filtered_df.columns:
|
250 |
+
# filtered_df[col] = filtered_df["initSettings"].apply(
|
251 |
+
# lambda x: x.get(setting)
|
252 |
+
# )
|
253 |
+
# if col not in grouping_cols:
|
254 |
+
# agg_dict[col] = "first"
|
255 |
|
256 |
# Group and aggregate
|
257 |
grouped_df = filtered_df.groupby(grouping_cols).agg(agg_dict).reset_index()
|
|
|
333 |
"n_context": "n_context",
|
334 |
"n_batch": "n_batch",
|
335 |
"n_ubatch": "n_ubatch",
|
336 |
+
"Version": "Version",
|
337 |
}
|
338 |
|
339 |
# Convert visible columns and grouping columns to their mapped names
|
src/services/firebase.py
CHANGED
@@ -98,7 +98,15 @@ def format_leaderboard_data(submissions: List[dict]) -> pd.DataFrame:
|
|
98 |
"Timestamp": benchmark_result.get("timestamp", "Unknown"),
|
99 |
"Model ID": benchmark_result.get("modelId", "Unknown"),
|
100 |
"OID": benchmark_result.get("oid"),
|
101 |
-
"initSettings": benchmark_result.get("initSettings"),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
"Version": device_info.get("version", "Unknown"),
|
103 |
}
|
104 |
)
|
|
|
98 |
"Timestamp": benchmark_result.get("timestamp", "Unknown"),
|
99 |
"Model ID": benchmark_result.get("modelId", "Unknown"),
|
100 |
"OID": benchmark_result.get("oid"),
|
101 |
+
#"initSettings": benchmark_result.get("initSettings"),
|
102 |
+
"n_threads": benchmark_result.get("initSettings", {}).get("n_threads", -1),
|
103 |
+
"n_gpu_layers": benchmark_result.get("initSettings", {}).get("n_gpu_layers", 0),
|
104 |
+
"flash_attn": benchmark_result.get("initSettings", {}).get("flash_attn", False),
|
105 |
+
"cache_type_k": benchmark_result.get("initSettings", {}).get("cache_type_k", "f16"),
|
106 |
+
"cache_type_v": benchmark_result.get("initSettings", {}).get("cache_type_v", "f16"),
|
107 |
+
"n_context": benchmark_result.get("initSettings", {}).get("n_context", -1),
|
108 |
+
"n_batch": benchmark_result.get("initSettings", {}).get("n_batch", -1),
|
109 |
+
"n_ubatch": benchmark_result.get("initSettings", {}).get("n_ubatch", -1),
|
110 |
"Version": device_info.get("version", "Unknown"),
|
111 |
}
|
112 |
)
|