File size: 1,168 Bytes
2a5f9fb df66f6e 2a5f9fb ffe4d51 7dd405e ffe4d51 08ae6c5 ffe4d51 2a5f9fb ffe4d51 aa84d16 ffe4d51 9833cdb 2a5f9fb ffe4d51 9833cdb ffe4d51 395eff6 1ffc326 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
import os
from huggingface_hub import HfApi
# Org/username where things are read/written
OWNER = "meg"
# Read/write token
TOKEN = os.environ.get("HF_TOKEN")
API = HfApi(token=TOKEN)
# Key for Perspective API
PERSPECTIVE_API_KEY = os.environ.get("PERSPECTIVE_API_KEY")
# Number of lines to read in the eval file, or None for all.
EVAL_CUTOFF = 120 # !!!! For testing, should be None for actual evaluations!!!
# How often to try to run eval.
REFRESH_RATE = 5 * 60 # 5 min
# How many lines to display in the log visualizer
NUM_LINES_VISUALIZE = 300
# Where results are displayed
REPO_ID = f"{OWNER}/leaderboard"
# Dataset directory where the requests are created
REQUESTS_REPO = f"{OWNER}/requests"
# Dataset directory where the results are written to
RESULTS_REPO = f"{OWNER}/results"
# If you set up a cache later, set HF_HOME to where it is
CACHE_PATH = os.getenv("HF_HOME", ".")
# Local caches
EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-requests")
EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
|