TITLE = """
🤗 Open LLM-Perf Leaderboard 🏋️
"""
INTRODUCTION_TEXT = f"""
The 🤗 Open LLM-Perf Leaderboard 🏋️ aims to benchmark the performance (latency & throughput) of Large Language Models (LLMs) with different hardwares, backends and optimizations using [Optimum-Benchmark](https://github.com/huggingface/optimum-benchmark) and [Optimum](https://github.com/huggingface/optimum) flavors.
Anyone from the community can request a model or a hardware+backend+optimization configuration for automated benchmarking:
- Model requests should be made in the [🤗 Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) and will be added to the 🤗 Open LLM-Perf Leaderboard 🏋️ automatically once they're publicly available. That's mostly because we don't want to benchmark models that don't have an evaluation score yet.
- Hardware+Backend+Optimization requests should be made in the 🤗 Open LLM-Perf Leaderboard 🏋️ [community discussions](https://huggingface.co/spaces/optimum/llm-perf-leaderboard/discussions) for open discussion about their relevance and feasibility.
"""
SINGLE_A100_TEXT = """Single-GPU Benchmark (1xA100):
- Singleton Batch (1)
- Thousand Tokens (1000)
"""
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results."
CITATION_BUTTON_TEXT = r"""@misc{open-llm-perf-leaderboard,
author = {Ilyas Moutawwakil},
title = {Open LLM-Perf Leaderboard},
year = {2023},
publisher = {Hugging Face},
howpublished = "\url{https://huggingface.co/spaces/optimum/llm-perf-leaderboard}",
@software{optimum-benchmark,
author = {Ilyas Moutawwakil},
publisher = {Hugging Face},
title = {Optimum-Benchmark: A framework for benchmarking the performance of Transformers models with different hardwares, backends and optimizations.},
}
"""