|
--- |
|
library_name: stable-baselines3 |
|
tags: |
|
- LunarLander-v2 |
|
- deep-reinforcement-learning |
|
- reinforcement-learning |
|
- stable-baselines3 |
|
model-index: |
|
- name: PPO |
|
results: |
|
- task: |
|
type: reinforcement-learning |
|
name: reinforcement-learning |
|
dataset: |
|
name: LunarLander-v2 |
|
type: LunarLander-v2 |
|
metrics: |
|
- type: mean_reward |
|
value: 256.49 +/- 25.52 |
|
name: mean_reward |
|
verified: false |
|
--- |
|
|
|
# **PPO** Agent playing **LunarLander-v2** |
|
This is a trained model of a **PPO** agent playing **LunarLander-v2** |
|
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). |
|
|
|
## Usage (with Stable-baselines3) |
|
TODO: Add your code |
|
|
|
|
|
```python |
|
import gymnasium |
|
|
|
from huggingface_sb3 import load_from_hub, package_to_hub |
|
from huggingface_hub import notebook_login # To log to our Hugging Face account to be able to upload models to the Hub. |
|
|
|
from stable_baselines3 import PPO |
|
from stable_baselines3.common.env_util import make_vec_env |
|
from stable_baselines3.common.evaluation import evaluate_policy |
|
from stable_baselines3.common.monitor import Monitor |
|
|
|
# Create the environment |
|
env = make_vec_env('LunarLander-v2', n_envs=16) |
|
|
|
model = PPO( |
|
policy = 'MlpPolicy', |
|
env = env, |
|
n_steps = 1024, |
|
batch_size = 64, |
|
n_epochs = 4, |
|
gamma = 0.999, |
|
gae_lambda = 0.98, |
|
ent_coef = 0.01, |
|
verbose=1) |
|
|
|
# Train it for 1,000,000 timesteps |
|
model.learn(total_timesteps=1000000) |
|
# Save the model |
|
model_name = "ppo-LunarLander-v2" |
|
model.save(model_name) |
|
|
|
#@title |
|
eval_env = Monitor(gym.make("LunarLander-v2")) |
|
mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes=10, deterministic=True) |
|
print(f"mean_reward={mean_reward:.2f} +/- {std_reward}") |
|
|
|
notebook_login() |
|
!git config --global credential.helper store |
|
|
|
import gymnasium as gym |
|
|
|
from stable_baselines3 import PPO |
|
from stable_baselines3.common.vec_env import DummyVecEnv |
|
from stable_baselines3.common.env_util import make_vec_env |
|
|
|
from huggingface_sb3 import package_to_hub |
|
|
|
# PLACE the variables you've just defined two cells above |
|
# Define the name of the environment |
|
env_id = "LunarLander-v2" |
|
|
|
# TODO: Define the model architecture we used |
|
model_architecture = "PPO" |
|
|
|
## Define a repo_id |
|
## repo_id is the id of the model repository from the Hugging Face Hub (repo_id = {organization}/{repo_name} for instance ThomasSimonini/ppo-LunarLander-v2 |
|
## CHANGE WITH YOUR REPO ID |
|
repo_id = "cryptoque/ppo-LunarLander-v2" # Change with your repo id, you can't push with mine ๐ |
|
|
|
## Define the commit message |
|
commit_message = "Upload PPO LunarLander-v2 trained agent" |
|
|
|
# Create the evaluation env and set the render_mode="rgb_array" |
|
eval_env = DummyVecEnv([lambda: gym.make(env_id, render_mode="rgb_array")]) |
|
|
|
# PLACE the package_to_hub function you've just filled here |
|
package_to_hub(model=model, # Our trained model |
|
model_name=model_name, # The name of our trained model |
|
model_architecture=model_architecture, # The model architecture we used: in our case PPO |
|
env_id=env_id, # Name of the environment |
|
eval_env=eval_env, # Evaluation Environment |
|
repo_id=repo_id, # id of the model repository from the Hugging Face Hub (repo_id = {organization}/{repo_name} for instance ThomasSimonini/ppo-LunarLander-v2 |
|
commit_message=commit_message) |
|
|
|
``` |
|
|