Spaces:
Running
Running
"""Module containing performance results for the Llama-Spark model.""" | |
results_llama_spark = { | |
"name": "Llama-Spark", | |
"modelType": "Llama 3.1 8B", | |
"configurations": [ | |
{ | |
"instanceType": "g5.2xlarge", | |
"quantization": "none", | |
"container": "TGI 2.2.0", | |
"status": "OK", | |
"tokensPerSecond": "29", | |
"notes": "4K/8K fails", | |
}, | |
{ | |
"instanceType": "g5.12xlarge", | |
"quantization": "none", | |
"container": "TGI 2.2.0", | |
"status": "OK", | |
"tokensPerSecond": "85", | |
"notes": '"MAX_INPUT_TOKENS": "16384", "MAX_TOTAL_TOKENS": "32768",', | |
}, | |
{ | |
"instanceType": "g5.48xlarge", | |
"quantization": "none", | |
"container": "TGI 2.2.0", | |
"status": "OK", | |
"tokensPerSecond": "105", | |
"notes": '"MAX_INPUT_TOKENS": "20480", "MAX_TOTAL_TOKENS": "40960"\n\n32K/64K fails', | |
}, | |
{ | |
"instanceType": "g6.12xlarge", | |
"quantization": "none", | |
"container": "TGI 2.2.0", | |
"status": "OK", | |
"tokensPerSecond": "51", | |
"notes": "same as g5?", | |
}, | |
{ | |
"instanceType": "g6.48xlarge", | |
"quantization": "none", | |
"container": "TGI 2.2.0", | |
"status": "OK", | |
"tokensPerSecond": "81", | |
"notes": "same as g5?", | |
}, | |
{ | |
"instanceType": "g6e.2xlarge", | |
"quantization": "none", | |
"status": "OK", | |
"configurations": [ | |
{"container": "TGI 2.2.0", "tokensPerSecond": "42.1"}, | |
{"container": "SGLang 0.2.13", "tokensPerSecond": "45"}, | |
{"container": "vLLM 0.5.5", "tokensPerSecond": "43.4"}, | |
], | |
}, | |
{ | |
"instanceType": "g6e.12xlarge", | |
"quantization": "none", | |
"status": "OK", | |
"configurations": [ | |
{"container": "TGI 2.2.0", "tokensPerSecond": "112"}, | |
{"container": "SGLang 0.2.13", "tokensPerSecond": "123"}, | |
{"container": "vLLM 0.5.5", "tokensPerSecond": "106"}, | |
], | |
}, | |
{ | |
"instanceType": "p4d.24xlarge", | |
"quantization": "none", | |
"container": "TGI 2.2.0", | |
"status": "OK", | |
"tokensPerSecond": "145", | |
"notes": '"MAX_INPUT_TOKENS": "40960", "MAX_TOTAL_TOKENS": "81920"\n\n64K/128K fails (even with 4-bit)', | |
}, | |
{ | |
"instanceType": "inf2.*", | |
"container": "TGI 2.2.0", | |
"status": "not supported", | |
"tokensPerSecond": "-", | |
"notes": "Llama-3.1: TGI OK, Neuron SDK OK, optimum-neuron KO", | |
}, | |
], | |
} | |