#!/usr/bin/env python # -*- coding: utf-8 -*- # flake8: noqa E501 from dataclasses import dataclass from enum import Enum @dataclass class Task: benchmark: str metric: str col_name: str # Select your tasks here # --------------------------------------------------- class Tasks(Enum): # task_key, metric_key, title task00 = Task("naive_judge", "score", "NaïveJudge") task01 = Task("human_eval_solidity_pass@1", "score", "HumanEval for Solidity (pass@1)") task02 = Task("human_eval_solidity_pass@3", "score", "HumanEval for Solidity (pass@3)") task03 = Task("rouge1", "score", "ROUGE-unigrams") task04 = Task("rouge2", "score", "ROUGE-bigrams") task05 = Task("rougeL", "score", "ROUGE-Longest Common Subsequence") task06 = Task("rougeLsum", "score", "ROUGE-Lsum") task07 = Task("bleu", "score", "Bleu") task08 = Task("brevity_penalty", "score", "Brevity Penalty") # --------------------------------------------------- # Your leaderboard name TITLE = """

Solidity Leaderboard

""" # What does your leaderboard evaluate? INTRODUCTION_TEXT = "" # Which evaluations are you running? how can people reproduce what you have? LLM_BENCHMARKS_TEXT = """ ## How it works ## Reproducibility To reproduce our results, here is the commands you can run: """ EVALUATION_REQUESTS_TEXT = """ ## Some good practices before submitting a model ### 1) Make sure you can load your model and tokenizer using AutoClasses: ```python from transformers import AutoConfig, AutoModel, AutoTokenizer config = AutoConfig.from_pretrained("your model name", revision=revision) model = AutoModel.from_pretrained("your model name", revision=revision) tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision) ``` If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded. Note: make sure your model is public. ### 2) Fill up your model card When we add extra information about models to the leaderboard, it will be automatically taken from the model card """ EVALUATION_SCRIPT = '' CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results" CITATION_BUTTON_TEXT = ''