File size: 6,708 Bytes
05c9ac2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 |
import os
import argparse
import yaml
import json
from pathlib import Path
from huggingface_hub import HfApi
from huggingface_hub.repocard import metadata_save
from typing import Tuple
from mlagents_envs import logging_util
from mlagents_envs.logging_util import get_logger
logger = get_logger(__name__)
logging_util.set_log_level(logging_util.INFO)
def _generate_config(local_dir: Path, configfile_name: str) -> None:
"""
Generate a config.json file from configuration.yaml
To do that we convert yaml to json
:param local_dir: path of the local directory
:param configfile_name: name of the yaml config file (by default configuration.yaml)
"""
# Read the YAML file and generate a config.json
with open(os.path.join(local_dir, configfile_name)) as yaml_in:
yaml_object = yaml.safe_load(yaml_in)
with open(os.path.join(local_dir, "config.json"), "w") as json_out:
json.dump(yaml_object, json_out)
def _generate_metadata(model_name: str, env_id: str) -> dict:
"""
Define the tags for the model card
:param model_name: name of the model
:param env_id: name of the environment
"""
env_tag = "ML-Agents-" + env_id
metadata = {}
metadata["library_name"] = "ml-agents"
metadata["tags"] = [ # type: ignore
env_id,
"deep-reinforcement-learning",
"reinforcement-learning",
env_tag,
]
return metadata
def _generate_model_card(
local_dir: Path, configfile_name: str, repo_id: str
) -> Tuple[str, dict]:
"""
Generate the model card
:param local_dir: local path of the directory
:param configfile_name: name of the yaml config file (by default configuration.yaml)
:param repo_id: id of the model repository from the Hugging Face Hub
"""
# Step 1: Read the config.json
with open(os.path.join(local_dir, "config.json")) as f:
data = json.load(f)
# Get env_id
env_id = list(data["behaviors"].keys())[0]
# Get trainer_type
model_name = data["behaviors"][env_id]["trainer_type"]
# Step 2: Create the metadata
metadata = _generate_metadata(model_name, env_id)
# Step 3: Generate the model card
model_card = f"""
# **{model_name}** Agent playing **{env_id}**
This is a trained model of a **{model_name}** agent playing **{env_id}**
using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents).
## Usage (with ML-Agents)
The Documentation: https://unity-technologies.github.io/ml-agents/ML-Agents-Toolkit-Documentation/
We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub:
- A *short tutorial* where you teach Huggy the Dog 🐶 to fetch the stick and then play with him directly in your
browser: https://huggingface.co/learn/deep-rl-course/unitbonus1/introduction
- A *longer tutorial* to understand how works ML-Agents:
https://huggingface.co/learn/deep-rl-course/unit5/introduction
### Resume the training
```bash
mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume
```
### Watch your Agent play
You can watch your agent **playing directly in your browser**
1. If the environment is part of ML-Agents official environments, go to https://huggingface.co/unity
2. Step 1: Find your model_id: {repo_id}
3. Step 2: Select your *.nn /*.onnx file
4. Click on Watch the agent play 👀
"""
return model_card, metadata
def _save_model_card(
local_dir: Path, generated_model_card: str, metadata: dict
) -> None:
"""Save a model card to the directory.
:param local_dir: local directory path
:param generated_model_card: model card generated by _generate_model_card() method
:param metadata: metadata
"""
readme_path = local_dir / "README.md"
with readme_path.open("w", encoding="utf-8") as f:
f.write(generated_model_card)
# Save our metrics to Readme metadata
metadata_save(readme_path, metadata)
def package_to_hub(
run_id: str,
path_of_run_id: Path,
repo_id: str,
commit_message: str,
configfile_name: str,
) -> None:
"""
This method generates the model card and upload the run_id folder
with all his files into the Hub
:param run_id : name of the run
:param path_of_run_id: path of the run_id folder that contains the onnx model.
:param repo_id: id of the model repository from the Hugging Face Hub
:param commit_message: commit message
:param configfile_name: name of the yaml config file (by default configuration.yaml)
"""
logger.info(
f"This function will create a model card and upload your {run_id} "
f"into HuggingFace Hub. This is a work in progress: If you encounter a bug, "
f"please send open an issue"
)
_, repo_name = repo_id.split("/")
# Step 1: Create the repo
api = HfApi()
repo_url = api.create_repo(
repo_id=repo_id,
exist_ok=True,
)
local_path = Path(path_of_run_id)
# Step 2: Create a config file
_generate_config(local_path, configfile_name)
# Step 3: Generate and save the model card
generated_model_card, metadata = _generate_model_card(
local_path, configfile_name, repo_id
)
_save_model_card(local_path, generated_model_card, metadata)
logger.info(f"Pushing repo {run_id} to the Hugging Face Hub")
# Step 4. Push everything to the Hub
api.upload_folder(
repo_id=repo_id, folder_path=local_path, commit_message=commit_message
)
logger.info(
f"Your model is pushed to the hub. You can view your model here: {repo_url}"
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--run-id", help="Name of the run-id folder", type=str)
parser.add_argument(
"--local-dir",
help="Path of the run_id folder that contains the trained model",
type=str,
default="./",
)
parser.add_argument(
"--repo-id",
help="Repo id of the model repository from the Hugging Face Hub",
type=str,
)
parser.add_argument(
"--commit-message", help="Commit message", type=str, default="Push to Hub"
)
parser.add_argument(
"--configfile-name",
help="Name of the configuration yaml file",
type=str,
default="configuration.yaml",
)
args = parser.parse_args()
# Push model to hub
package_to_hub(
args.run_id,
args.local_dir,
args.repo_id,
args.commit_message,
args.configfile_name,
)
# For python debugger to directly run this script
if __name__ == "__main__":
main()
|