# Code adapted from: https://huggingface.co/spaces/RaoFoundation/pretraining-leaderboard/blob/main/app.py import os import datetime import gradio as gr from dotenv import load_dotenv from huggingface_hub import HfApi from apscheduler.schedulers.background import BackgroundScheduler import utils FONT = ( """""" ) TITLE = """

Subnet 9 Leaderboard

""" HEADER = """

Subnet 9 is a Bittensor subnet that rewards miners for producing pretrained Foundation-Models on the Falcon Refined Web dataset. It acts like a continuous benchmark whereby miners are rewarded for attaining the best losses on randomly sampled pages of Falcon.
The models with the best head-to-head loss on the evaluation data receive a steady emission of TAO.

""" EVALUATION_DETAILS = """
More stats on taostats.""" EVALUATION_HEADER = """

Shows the latest internal evaluation statistics as calculated by the Opentensor validator

""" HF_REPO_ID = "macrocosm-os/pretraining-leaderboard" SECONDS_PER_BLOCK = 12 load_dotenv() HF_TOKEN = os.environ.get("HF_TOKEN", None) API = HfApi(token=HF_TOKEN) def get_next_update_div(current_block: int, next_update_block: int) -> str: now = datetime.datetime.now() blocks_to_go = next_update_block - current_block next_update_time = now + datetime.timedelta( seconds=blocks_to_go * SECONDS_PER_BLOCK ) delta = next_update_time - now return f"""
Next reward update: {blocks_to_go} blocks (~{int(delta.total_seconds() // 60)} minutes)
""" def get_last_updated_div() -> str: return f"""
Last Updated: {datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")} (UTC)
""" def restart_space(): API.restart_space(repo_id=HF_REPO_ID, token=HF_TOKEN) def main(): # To avoid leaderboard failures, infinitely try until we get all data # needed to populate the dashboard state_vars = utils.load_state_vars() model_data = state_vars["model_data"] vali_runs = state_vars["vali_runs"] scores = state_vars["scores"] validator_df = state_vars["validator_df"] benchmarks = state_vars.get("benchmarks", None) benchmark_timestamp = state_vars.get("benchmark_timestamp", None) demo = gr.Blocks(css=".typewriter {font-family: 'JMH Typewriter', sans-serif;}") with demo: gr.HTML(FONT) gr.HTML(TITLE) gr.HTML(HEADER) # TODO: Re-enable once ""SubtensorModule.BlocksSinceEpoch" not found" issue is resolved. # gr.HTML(value=get_next_update_div(current_block, next_epoch_block)) gr.Label( value={ f"{c.namespace}/{c.name} ({c.commit[0:8]}) · (τ{round(c.emission, 2):,})": c.incentive for c in model_data if c.incentive }, num_top_classes=10, ) if benchmarks is not None: with gr.Accordion("Top Model Benchmarks"): gr.components.Dataframe(benchmarks) gr.HTML("""
PPL computed using a stride of 512. See here for the full code.
""") gr.HTML(f"""
Last Updated: {benchmark_timestamp.strftime("%Y-%m-%d %H:%M:%S")} (UTC)
""") with gr.Accordion("Evaluation Stats"): gr.HTML(EVALUATION_HEADER) show_stale = gr.Checkbox(label="Show Stale", interactive=True) leaderboard_table = gr.components.Dataframe( value=utils.leaderboard_data(model_data, scores, show_stale.value), headers=["Name", "Win Rate", "Average Loss", "Weight", "UID", "Block"], datatype=["markdown", "number", "number", "number", "number", "number"], elem_id="leaderboard-table", interactive=False, visible=True, ) gr.HTML(EVALUATION_DETAILS) show_stale.change( lambda stale: utils.leaderboard_data(model_data, scores, stale), inputs=[show_stale], outputs=leaderboard_table, ) gr.LinePlot( utils.get_losses_over_time(vali_runs), x="timestamp", x_title="Date", y="best_loss", y_title="Average Loss", tooltip="best_loss", interactive=True, visible=True, width=1024, title="Best Average Loss Over Time", ) with gr.Accordion("Validator Stats"): gr.components.Dataframe( utils.make_validator_dataframe(validator_df, model_data), interactive=False, visible=True, ) gr.HTML(value=get_last_updated_div()) scheduler = BackgroundScheduler() scheduler.add_job( restart_space, "interval", seconds=60 * 30 ) # restart every 15 minutes scheduler.start() demo.launch() main()