File size: 9,436 Bytes
059d8f0
 
 
 
 
 
 
fb67b80
059d8f0
d27283f
 
 
e9f37ce
 
 
 
 
 
 
059d8f0
 
 
391b960
f88c021
391b960
059d8f0
f88c021
059d8f0
 
 
 
31d2bda
059d8f0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ad80e42
 
059d8f0
ad80e42
 
 
 
 
 
 
 
059d8f0
ad80e42
 
059d8f0
 
 
e9f37ce
059d8f0
 
e9f37ce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
059d8f0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e9f37ce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f88c021
 
 
 
e9f37ce
 
 
 
 
 
 
 
 
 
059d8f0
 
 
 
 
 
 
 
 
f88c021
 
 
 
31d2bda
059d8f0
 
 
 
 
 
 
 
 
 
 
31d2bda
7fa09dc
 
31d2bda
 
 
 
 
 
 
 
 
 
 
 
 
7fa09dc
 
 
31d2bda
 
 
 
 
 
 
6867483
 
 
 
 
e9f37ce
6867483
92334e0
 
 
 
 
 
6867483
31d2bda
 
6867483
31d2bda
7fa09dc
31d2bda
 
7fa09dc
 
e9f37ce
 
 
 
391b960
 
d27283f
 
 
 
e9f37ce
 
 
7fa09dc
31d2bda
059d8f0
92334e0
 
 
 
 
059d8f0
 
 
6867483
059d8f0
31d2bda
 
 
6867483
 
7fa09dc
6867483
31d2bda
e838107
059d8f0
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
import requests
import pandas as pd
from tqdm.auto import tqdm

import gradio as gr
from huggingface_hub import HfApi, hf_hub_download
from huggingface_hub.repocard import metadata_load


RL_ENVS = ['LunarLander-v2','CarRacing-v0','MountainCar-v0',
 'BipedalWalker-v3','FrozenLake-v1','FrozenLake-v1-no_slippery',
 'Taxi-v3','Cliffwalker-v0']

with open('app.css','r') as f:
    BLOCK_CSS = f.read() 

LOADED_MODEL_IDS = {rl_env:[] for rl_env in RL_ENVS}


# Based on Omar Sanseviero work
# Make model clickable link
def make_clickable_model(model_name):
    # remove user from model name
    model_name_show = ' '.join(model_name.split('/')[1:])

    link = "https://huggingface.co/" + model_name
    return f'<a target="_blank" href="{link}">{model_name_show}</a>'

# Make user clickable link
def make_clickable_user(user_id):
    link = "https://huggingface.co/" + user_id
    return f'<a  target="_blank" href="{link}">{user_id}</a>'
    
def get_model_ids(rl_env):
    api = HfApi()
    models = api.list_models(filter=rl_env)
    model_ids = [x.modelId for x in models]
    return model_ids
    
def get_metadata(model_id):
    try:
        readme_path = hf_hub_download(model_id, filename="README.md")
        return metadata_load(readme_path)
    except requests.exceptions.HTTPError:
        # 404 README.md not found
        return None
        
def parse_metrics_accuracy(meta):
    if "model-index" not in meta:
        return None
    result = meta["model-index"][0]["results"]
    metrics = result[0]["metrics"]
    accuracy = metrics[0]["value"]
    return accuracy

# We keep the worst case episode
def parse_rewards(accuracy):
    default_std = -1000
    default_reward=-1000
    if accuracy !=  None:
        parsed =  accuracy.split(' +/- ')
        if len(parsed)>1:
            mean_reward = float(parsed[0])
            std_reward =  float(parsed[1])
        else: 
            mean_reward = default_std
            std_reward = default_reward

    else:
        mean_reward = default_std
        std_reward = default_reward
    return mean_reward, std_reward

def get_data(rl_env):
    global LOADED_MODEL_IDS 
    data = []
    model_ids = get_model_ids(rl_env)
    LOADED_MODEL_IDS[rl_env]+=model_ids

    for model_id in tqdm(model_ids):
        meta = get_metadata(model_id)
        if meta is None:
            continue
        user_id = model_id.split('/')[0]
        row = {}
        row["User"] = user_id
        row["Model"] = model_id
        accuracy = parse_metrics_accuracy(meta)
        mean_reward, std_reward = parse_rewards(accuracy)
        row["Results"] = mean_reward - std_reward
        row["Mean Reward"] = mean_reward
        row["Std Reward"] = std_reward
        data.append(row)
    return pd.DataFrame.from_records(data)



def update_data(rl_env):
    global LOADED_MODEL_IDS
    data = []
    model_ids = [x for x in get_model_ids(rl_env) if x not in LOADED_MODEL_IDS[rl_env]]
    LOADED_MODEL_IDS[rl_env]+=model_ids

    for model_id in tqdm(model_ids):
        meta = get_metadata(model_id)
        if meta is None:
            continue
        user_id = model_id.split('/')[0]
        row = {}
        row["User"] = user_id
        row["Model"] = model_id
        accuracy = parse_metrics_accuracy(meta)
        mean_reward, std_reward = parse_rewards(accuracy)
        row["Results"] = mean_reward - std_reward
        row["Mean Reward"] = mean_reward
        row["Std Reward"] = std_reward
        data.append(row)
    return pd.DataFrame.from_records(data)



def update_data_per_env(rl_env):
    global RL_DETAILS

    _,old_dataframe,_ = RL_DETAILS[rl_env]['data']
    new_dataframe = update_data(rl_env)

    new_dataframe = new_dataframe.fillna("")
    if not new_dataframe.empty:
        new_dataframe["User"] = new_dataframe["User"].apply(make_clickable_user)
        new_dataframe["Model"] = new_dataframe["Model"].apply(make_clickable_model)

    dataframe = pd.concat([old_dataframe,new_dataframe])

    if not dataframe.empty:
       
        dataframe = dataframe.sort_values(by=['Results'], ascending=False)
        if not 'Ranking' in dataframe.columns:
            dataframe.insert(0, 'Ranking', [i for i in range(1,len(dataframe)+1)])
        else:
           dataframe['Ranking'] =   [i for i in range(1,len(dataframe)+1)]   
        table_html = dataframe.to_html(escape=False, index=False,justify = 'left')
        return table_html,dataframe,dataframe.empty
    else: 
        html = """<div style="color: green">
                <p> βŒ› Please wait. Results will be out soon... </p>
                </div>
               """
        return html,dataframe,dataframe.empty   


def get_data_per_env(rl_env):
    dataframe = get_data(rl_env)
    dataframe = dataframe.fillna("")

    if not dataframe.empty:
        # turn the model ids into clickable links
        dataframe["User"] = dataframe["User"].apply(make_clickable_user)
        dataframe["Model"] = dataframe["Model"].apply(make_clickable_model)
        dataframe = dataframe.sort_values(by=['Results'], ascending=False)
        if not 'Ranking' in dataframe.columns:
            dataframe.insert(0, 'Ranking', [i for i in range(1,len(dataframe)+1)])
        else:
           dataframe['Ranking'] =   [i for i in range(1,len(dataframe)+1)]   
        table_html = dataframe.to_html(escape=False, index=False,justify = 'left')
        return table_html,dataframe,dataframe.empty
    else: 
        html = """<div style="color: green">
                <p> βŒ› Please wait. Results will be out soon... </p>
                </div>
               """
        return html,dataframe,dataframe.empty   




def get_info_display(len_dataframe,env_name,name_leaderboard,is_empty):
    if not is_empty:
        markdown = """
        <div class='infoPoint'>
        <h1> {name_leaderboard} </h1>
        <br>
        <p> This is a leaderboard of <b>{len_dataframe}</b> agents playing {env_name} πŸ‘©β€πŸš€. </p>
        <br>
        <p> We use lower bound result to sort the models: mean_reward - std_reward. </p>
        <br>    
        <p> You can click on the model's name to be redirected to its model card which includes documentation. </p>
        <br>
        <p> You want to try your model? Read this <a href="https://github.com/huggingface/deep-rl-class/blob/Unit1/unit1/README.md" target="_blank">Unit 1</a> of Deep Reinforcement Learning Class.
        </p>
        </div>
        """.format(len_dataframe = len_dataframe,env_name = env_name,name_leaderboard = name_leaderboard)

    else:
        markdown = """
        <div class='infoPoint'>
        <h1> {name_leaderboard} </h1>
        <br>
        </div>                  
        """.format(name_leaderboard =  name_leaderboard)
    return markdown    

def reload_all_data():

    global RL_DETAILS,RL_ENVS

    for rl_env in RL_ENVS:
        RL_DETAILS[rl_env]['data'] = update_data_per_env(rl_env)

    html = """<div style="color: green">
                <p> βœ… Leaderboard updated! Click `Reload Leaderboard` to see the current leaderboard.</p>
                </div>
               """    
    return html            


def reload_leaderboard(rl_env):
    global RL_DETAILS
 
    data_html,data_dataframe,is_empty = RL_DETAILS[rl_env]['data'] 

    markdown = get_info_display(len(data_dataframe),rl_env,RL_DETAILS[rl_env]['title'],is_empty)            
    
    return markdown,data_html     
            

           
RL_DETAILS ={'CarRacing-v0':{'title':" The Car Racing 🏎️ Leaderboard πŸš€",'data':get_data_per_env('CarRacing-v0')},
            'MountainCar-v0':{'title':"The Mountain Car ⛰️ πŸš— Leaderboard πŸš€",'data':get_data_per_env('MountainCar-v0')},
            'LunarLander-v2':{'title':"The Lunar Lander πŸŒ• Leaderboard πŸš€",'data':get_data_per_env('LunarLander-v2')},
            'BipedalWalker-v3':{'title':"The BipedalWalker Leaderboard πŸš€",'data':get_data_per_env('BipedalWalker-v3')},
            'FrozenLake-v1':{'title':"The FrozenLake Leaderboard πŸš€",'data':get_data_per_env('FrozenLake-v1')},
            'FrozenLake-v1-no_slippery':{'title':'The FrozenLake-v1-no_slippery Leaderboard πŸš€','data':get_data_per_env('FrozenLake-v1-no_slippery')},
            'Taxi-v3':{'title':'The Taxi-v3πŸš– Leaderboard πŸš€','data':get_data_per_env('Taxi-v3')},
            'Cliffwalker-v0':{'title':'The Cliffwalker-v0 Leaderboard πŸš€','data':get_data_per_env('Cliffwalker-v0')},
            }



block = gr.Blocks(css=BLOCK_CSS)
with block:
    notification = gr.HTML("""<div style="color: green">
                <p> βŒ› Updating leaderboard... </p>
                </div>
               """)
    block.load(reload_all_data,[],[notification])
    
    with gr.Tabs():
        for rl_env in RL_ENVS:
            with gr.TabItem(rl_env) as rl_tab:
                data_html,data_dataframe,is_empty = RL_DETAILS[rl_env]['data'] 
                markdown = get_info_display(len(data_dataframe),rl_env,RL_DETAILS[rl_env]['title'],is_empty)            
                env_state =gr.Variable(default_value=rl_env)  
                output_markdown = gr.HTML(markdown)
                reload = gr.Button('Reload Leaderboard')

                output_html = gr.HTML(data_html)

                reload.click(reload_leaderboard,inputs=[env_state],outputs=[output_markdown,output_html])
                rl_tab.select(reload_leaderboard,inputs=[env_state],outputs=[output_markdown,output_html])

block.launch()