minhopark-neubla's picture
[MLP-1479] Init Neubla LLM Evaluation Board
996dccf
raw
history blame
345 Bytes
import os
from huggingface_hub import HfApi
# clone / pull the lmeh eval data
HF_TOKEN = os.environ.get("HF_TOKEN", None)
RESULTS_REPO = "NMOF-evaluation-board/results"
REPO_ID = "NMOF-evaluation-board/llm-evaluation-boards"
CACHE_PATH = os.getenv("HF_HOME", ".")
EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
API = HfApi()