File size: 2,382 Bytes
68ed9a2 1613f96 cc7bfac 1613f96 6bf4f4e bd9f032 05a2196 bd9f032 1613f96 6654a4e 1442a9d 1613f96 827199d 1613f96 9b88843 1613f96 6bf4f4e 1613f96 6bf4f4e 1613f96 6bf4f4e 1613f96 e9f2fe4 1613f96 e9f2fe4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# flake8: noqa E501
from dataclasses import dataclass
from enum import Enum
@dataclass
class Task:
benchmark: str
metric: str
col_name: str
# Select your tasks here
# ---------------------------------------------------
class Tasks(Enum):
# task_key, metric_key, title
task00 = Task("naive_judge", "score", "NaïveJudge")
task01 = Task("human_eval_solidity_pass_1", "score", "HumanEval for Solidity (pass@1)")
task02 = Task("human_eval_solidity_pass_3", "score", "HumanEval for Solidity (pass@3)")
task03 = Task("rouge1", "score", "ROUGE-unigrams")
task04 = Task("rouge2", "score", "ROUGE-bigrams")
task05 = Task("rougeL", "score", "ROUGE-Longest Common Subsequence")
task06 = Task("rougeLsum", "score", "ROUGE-Lsum")
task07 = Task("bleu", "score", "Bleu")
task08 = Task("brevity_penalty", "score", "Brevity Penalty")
# ---------------------------------------------------
# Your leaderboard name
TITLE = """<br><img src="file/images/solbench.svg" width="500" style="display: block; margin-left: auto; margin-right: auto;">
<h2 align="center" id="space-title">IQ Code | Solidity Leaderboard</h2>"""
# What does your leaderboard evaluate?
INTRODUCTION_TEXT = ""
# Which evaluations are you running? how can people reproduce what you have?
LLM_BENCHMARKS_TEXT = """
## How it works
## Reproducibility
To reproduce our results, here is the commands you can run:
"""
EVALUATION_REQUESTS_TEXT = """
## Some good practices before submitting a model
### 1) Make sure you can load your model and tokenizer using AutoClasses:
```python
from transformers import AutoConfig, AutoModel, AutoTokenizer
config = AutoConfig.from_pretrained("your model name", revision=revision)
model = AutoModel.from_pretrained("your model name", revision=revision)
tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
```
If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
Note: make sure your model is public.
### 2) Fill up your model card
When we add extra information about models to the leaderboard, it will be automatically taken from the model card
"""
EVALUATION_SCRIPT = ''
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
CITATION_BUTTON_TEXT = ''
|