eduardo-alvarez
commited on
Commit
•
b050867
1
Parent(s):
606a50f
adding 2b to filters to account for gemma based models
Browse files
app.py
CHANGED
@@ -154,10 +154,10 @@ with demo:
|
|
154 |
value=["No Affiliation","Intel Innovator","Student Ambassador","Intel Liftoff", "Intel Engineering", "Other"])
|
155 |
|
156 |
with gr.Column():
|
157 |
-
filter_size = gr.CheckboxGroup(choices=[1,3,5,7,13,35,60,70,100],
|
158 |
label="Model Sizes (Billion of Parameters)",
|
159 |
elem_id="parameter_size",
|
160 |
-
value=[1,3,5,7,13,35,60,70,100])
|
161 |
filter_precision = gr.CheckboxGroup(choices=["fp32","fp16","bf16","int8","fp8", "int4"],
|
162 |
label="Model Precision",
|
163 |
elem_id="precision",
|
@@ -178,7 +178,7 @@ with demo:
|
|
178 |
initial_filtered_df = update_df(["Gaudi","Xeon","GPU Max","Arc GPU","Core Ultra"],
|
179 |
["Intel Developer Cloud","AWS","Azure","Google Cloud Platform","Local"],
|
180 |
["No Affiliation","Intel Innovator","Student Ambassador","Intel Liftoff", "Intel Engineering", "Other"],
|
181 |
-
[1,3,5,7,13,35,60,70,100],
|
182 |
["fp32","fp16","bf16","int8","fp8", "int4"],
|
183 |
["pretrained","fine-tuned","chat-models","merges/moerges"])
|
184 |
|
|
|
154 |
value=["No Affiliation","Intel Innovator","Student Ambassador","Intel Liftoff", "Intel Engineering", "Other"])
|
155 |
|
156 |
with gr.Column():
|
157 |
+
filter_size = gr.CheckboxGroup(choices=[1,2,3,5,7,13,35,60,70,100],
|
158 |
label="Model Sizes (Billion of Parameters)",
|
159 |
elem_id="parameter_size",
|
160 |
+
value=[1,2,3,5,7,13,35,60,70,100])
|
161 |
filter_precision = gr.CheckboxGroup(choices=["fp32","fp16","bf16","int8","fp8", "int4"],
|
162 |
label="Model Precision",
|
163 |
elem_id="precision",
|
|
|
178 |
initial_filtered_df = update_df(["Gaudi","Xeon","GPU Max","Arc GPU","Core Ultra"],
|
179 |
["Intel Developer Cloud","AWS","Azure","Google Cloud Platform","Local"],
|
180 |
["No Affiliation","Intel Innovator","Student Ambassador","Intel Liftoff", "Intel Engineering", "Other"],
|
181 |
+
[1,2,3,5,7,13,35,60,70,100],
|
182 |
["fp32","fp16","bf16","int8","fp8", "int4"],
|
183 |
["pretrained","fine-tuned","chat-models","merges/moerges"])
|
184 |
|