Spaces:
Runtime error
Runtime error
File size: 6,085 Bytes
b62b9e0 513af34 f980695 513af34 b62b9e0 eb1e3b7 3ee7ba6 b62b9e0 513af34 f980695 3ee7ba6 f980695 513af34 3a5a13b 513af34 4b1b8a4 513af34 72e56e4 3ee7ba6 f980695 f3fe60b 513af34 eb1e3b7 513af34 4b1b8a4 44c646a 513af34 3a5a13b 513af34 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 |
import os
import random
import pandas as pd
from datetime import datetime
from huggingface_hub import HfApi, Repository
DATASET_REPO_URL = "https://huggingface.co/datasets/CarlCochet/BotFightData"
ELO_FILENAME = "soccer_elo.csv"
ELO_DIR = "soccer_elo"
HF_TOKEN = os.environ.get("HF_TOKEN")
repo = Repository(
local_dir=ELO_DIR, clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN
)
class Model:
"""
Class containing the info of a model.
:param name: Name of the model
:param elo: Elo rating of the model
:param games_played: Number of games played by the model (useful if we implement sigma uncertainty)
"""
def __init__(self, author, name, elo=1200, games_played=0):
self.author = author
self.name = name
self.elo = elo
self.games_played = games_played
class Matchmaking:
"""
Class managing the matchmaking between the models.
:param models: List of models
:param queue: Temporary list of models used for the matching process
:param k: Dev coefficient
:param max_diff: Maximum difference considered between two models' elo
:param matches: Dictionary containing the match history (to later upload as CSV)
"""
def __init__(self, models):
self.models = models
self.queue = self.models.copy()
self.k = 20
self.max_diff = 500
self.matches = {
"model1": [],
"model2": [],
"result": [],
"datetime": [],
"env": []
}
def run(self):
"""
Run the matchmaking process.
Add models to the queue, shuffle it, and match the models one by one to models with close ratings.
Compute the new elo for each model after each match and add the match to the match history.
"""
self.queue = self.models.copy()
random.shuffle(self.queue)
while len(self.queue) > 1:
model1 = self.queue.pop(0)
model2 = self.queue.pop(self.find_n_closest_indexes(model1, 10))
result = match(model1, model2)
self.compute_elo(model1, model2, result)
self.matches["model1"].append(model1.name)
self.matches["model2"].append(model2.name)
self.matches["result"].append(result)
self.matches["datetime"].append(datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"))
def compute_elo(self, model1, model2, result):
""" Compute the new elo for each model based on a match result. """
delta = model1.elo - model2.elo
win_probability = 1 / (1 + 10 ** (-delta / 500))
model1.elo += self.k * (result - win_probability)
model2.elo -= self.k * (result - win_probability)
def find_n_closest_indexes(self, model, n) -> int:
"""
Get a model index with a fairly close rating. If no model is found, return the last model in the queue.
We don't always pick the closest rating to add variety to the matchups.
:param model: Model to compare
:param n: Number of close models from which to pick a candidate
:return: id of the chosen candidate
"""
indexes = []
closest_diffs = [9999999] * n
for i, m in enumerate(self.queue):
if m.name == model.name:
continue
diff = abs(m.elo - model.elo)
if diff < max(closest_diffs):
closest_diffs.append(diff)
closest_diffs.sort()
closest_diffs.pop()
indexes.append(i)
random.shuffle(indexes)
return indexes[0]
def to_csv(self):
""" Save the match history as a CSV file to the hub. """
data_dict = {"rank": [], "author": [], "model": [], "elo": [], "games_played": []}
sorted_models = sorted(self.models, key=lambda x: x.elo, reverse=True)
for i, model in enumerate(sorted_models):
data_dict["rank"].append(i + 1)
data_dict["author"].append(model.author)
data_dict["model"].append(model.name)
data_dict["elo"].append(model.elo)
data_dict["games_played"].append(model.games_played)
df = pd.DataFrame(data_dict)
print(df.head())
df.to_csv(os.path.join(ELO_DIR, ELO_FILENAME), index=False)
repo.push_to_hub(commit_message="Update ELO")
# df_matches = pd.DataFrame(self.matches)
# date = datetime.now()
# df_matches.to_csv(f"match_history/{date.strftime('%Y-%m-%d_%H-%M-%S_%f')}.csv", index=False)
def match(model1, model2) -> float:
"""
!!! Current code is placeholder !!!
TODO: Launch a Unity process with the 2 models and get the result of the match
:param model1: First Model object
:param model2: Second Model object
:return: match result (0: model1 lost, 0.5: draw, 1: model1 won)
"""
result = random.randint(0, 2) / 2
model1.games_played += 1
model2.games_played += 1
return result
def get_models_list() -> list:
"""
!!! Current code is placeholder !!!
TODO: Create a list of Model objects from the models found on the hub
:return: list of Model objects
"""
models = []
models_names = []
data = pd.read_csv(os.path.join(DATASET_REPO_URL, "resolve", "main", ELO_FILENAME))
# models_on_hub = api.list_models(filter=["reinforcement-learning", env, "stable-baselines3"])
models_on_hub = []
for i, row in data.iterrows():
models.append(Model(row["author"], row["model"], row["elo"], row["games_played"]))
models_names.append(row["model"])
for model in models_on_hub:
if model.modelId not in models_names:
models.append(Model(model.author, model.modelId))
return models
def init_matchmaking():
models = get_models_list()
matchmaking = Matchmaking(models)
matchmaking.run()
matchmaking.to_csv()
print("Matchmaking done ---", datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"))
if __name__ == "__main__":
print("It's running!")
api = HfApi()
init_matchmaking()
|