Julien Simon commited on
Commit
9f46468
1 Parent(s): 0c0f086

Fix N/A values

Browse files
Files changed (2) hide show
  1. app.py +10 -5
  2. results_llama_spark.py +10 -0
app.py CHANGED
@@ -9,7 +9,7 @@ import re
9
  import gradio as gr
10
  import pandas as pd
11
 
12
- from results import results
13
 
14
  logging.basicConfig(level=logging.DEBUG)
15
 
@@ -107,14 +107,19 @@ def display_results(model_name):
107
  cloud = config.get("cloud", "N/A")
108
  instance_type = config.get("instanceType", "N/A")
109
 
 
 
 
 
 
110
  if "configurations" in config:
111
  for nested_config in config["configurations"]:
112
  data.append(
113
  {
114
  "Cloud": cloud,
115
  "Instance Type": instance_type,
116
- "GPU": config.get("gpu", "N/A"),
117
- "GPU RAM": config.get("gpuRAM", "N/A"),
118
  "Status": nested_config.get("status", "N/A"),
119
  "Quantization": nested_config.get(
120
  "quantization", "N/A"
@@ -134,8 +139,8 @@ def display_results(model_name):
134
  {
135
  "Cloud": cloud,
136
  "Instance Type": instance_type,
137
- "GPU": config.get("gpu", "N/A"),
138
- "GPU RAM": config.get("gpuRAM", "N/A"),
139
  "Status": config.get("status", "N/A"),
140
  "Quantization": config.get("quantization", "N/A"),
141
  "Container": config.get(
 
9
  import gradio as gr
10
  import pandas as pd
11
 
12
+ from results import instance_type_mappings, results
13
 
14
  logging.basicConfig(level=logging.DEBUG)
15
 
 
107
  cloud = config.get("cloud", "N/A")
108
  instance_type = config.get("instanceType", "N/A")
109
 
110
+ # Fetch GPU and GPU RAM information from instance_type_mappings
111
+ instance_info = instance_type_mappings.get(instance_type, {})
112
+ gpu = instance_info.get("gpu", "N/A")
113
+ gpu_ram = instance_info.get("gpuRAM", "N/A")
114
+
115
  if "configurations" in config:
116
  for nested_config in config["configurations"]:
117
  data.append(
118
  {
119
  "Cloud": cloud,
120
  "Instance Type": instance_type,
121
+ "GPU": gpu,
122
+ "GPU RAM": gpu_ram,
123
  "Status": nested_config.get("status", "N/A"),
124
  "Quantization": nested_config.get(
125
  "quantization", "N/A"
 
139
  {
140
  "Cloud": cloud,
141
  "Instance Type": instance_type,
142
+ "GPU": gpu,
143
+ "GPU RAM": gpu_ram,
144
  "Status": config.get("status", "N/A"),
145
  "Quantization": config.get("quantization", "N/A"),
146
  "Container": config.get(
results_llama_spark.py CHANGED
@@ -54,6 +54,16 @@ results_llama_spark = {
54
  {"container": "vLLM 0.5.5", "tokensPerSecond": "43.4"},
55
  ],
56
  },
 
 
 
 
 
 
 
 
 
 
57
  {
58
  "instanceType": "p4d.24xlarge",
59
  "quantization": "none",
 
54
  {"container": "vLLM 0.5.5", "tokensPerSecond": "43.4"},
55
  ],
56
  },
57
+ {
58
+ "instanceType": "g6e.12xlarge",
59
+ "quantization": "none",
60
+ "status": "OK",
61
+ "configurations": [
62
+ {"container": "TGI 2.2.0", "tokensPerSecond": "112"},
63
+ {"container": "SGLang 0.2.13", "tokensPerSecond": "123"},
64
+ {"container": "vLLM 0.5.5", "tokensPerSecond": "106"},
65
+ ],
66
+ },
67
  {
68
  "instanceType": "p4d.24xlarge",
69
  "quantization": "none",