Upload PPO LunarLander-v2 trained agent
Browse files- README.md +16 -40
- config.json +1 -1
- ppo-LunarLander-v2.zip +2 -2
- ppo-LunarLander-v2/data +18 -18
- ppo-LunarLander-v2/policy.optimizer.pth +1 -1
- ppo-LunarLander-v2/policy.pth +1 -1
- ppo-LunarLander-v2/system_info.txt +1 -1
- replay.mp4 +0 -0
- results.json +1 -1
README.md
CHANGED
@@ -1,11 +1,10 @@
|
|
1 |
---
|
|
|
2 |
tags:
|
3 |
- LunarLander-v2
|
4 |
-
- ppo
|
5 |
- deep-reinforcement-learning
|
6 |
- reinforcement-learning
|
7 |
-
-
|
8 |
-
- deep-rl-course
|
9 |
model-index:
|
10 |
- name: PPO
|
11 |
results:
|
@@ -17,45 +16,22 @@ model-index:
|
|
17 |
type: LunarLander-v2
|
18 |
metrics:
|
19 |
- type: mean_reward
|
20 |
-
value:
|
21 |
name: mean_reward
|
22 |
verified: false
|
23 |
---
|
24 |
|
25 |
-
|
|
|
|
|
26 |
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
'wandb_entity': None
|
38 |
-
'capture_video': False
|
39 |
-
'env_id': 'LunarLander-v2'
|
40 |
-
'total_timesteps': 1000000
|
41 |
-
'learning_rate': 0.00025
|
42 |
-
'num_envs': 4
|
43 |
-
'num_steps': 1024
|
44 |
-
'anneal_lr': True
|
45 |
-
'gae': True
|
46 |
-
'gamma': 0.99
|
47 |
-
'gae_lambda': 0.95
|
48 |
-
'num_minibatches': 4
|
49 |
-
'update_epochs': 20
|
50 |
-
'norm_adv': True
|
51 |
-
'clip_coef': 0.2
|
52 |
-
'clip_vloss': True
|
53 |
-
'ent_coef': 0.01
|
54 |
-
'vf_coef': 0.5
|
55 |
-
'max_grad_norm': 0.5
|
56 |
-
'target_kl': None
|
57 |
-
'repo_id': 'r0in/ppo-lunarlander-v2'
|
58 |
-
'batch_size': 4096
|
59 |
-
'minibatch_size': 1024}
|
60 |
-
```
|
61 |
-
|
|
|
1 |
---
|
2 |
+
library_name: stable-baselines3
|
3 |
tags:
|
4 |
- LunarLander-v2
|
|
|
5 |
- deep-reinforcement-learning
|
6 |
- reinforcement-learning
|
7 |
+
- stable-baselines3
|
|
|
8 |
model-index:
|
9 |
- name: PPO
|
10 |
results:
|
|
|
16 |
type: LunarLander-v2
|
17 |
metrics:
|
18 |
- type: mean_reward
|
19 |
+
value: 248.59 +/- 18.37
|
20 |
name: mean_reward
|
21 |
verified: false
|
22 |
---
|
23 |
|
24 |
+
# **PPO** Agent playing **LunarLander-v2**
|
25 |
+
This is a trained model of a **PPO** agent playing **LunarLander-v2**
|
26 |
+
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
|
27 |
|
28 |
+
## Usage (with Stable-baselines3)
|
29 |
+
TODO: Add your code
|
30 |
+
|
31 |
+
|
32 |
+
```python
|
33 |
+
from stable_baselines3 import ...
|
34 |
+
from huggingface_sb3 import load_from_hub
|
35 |
+
|
36 |
+
...
|
37 |
+
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
config.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"policy_class": {":type:": "<class 'abc.ABCMeta'>", ":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==", "__module__": "stable_baselines3.common.policies", "__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ", "__init__": "<function ActorCriticPolicy.__init__ at 0x7fcb1fc6b1c0>", "_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at 0x7fcb1fc6b250>", "reset_noise": "<function ActorCriticPolicy.reset_noise at 0x7fcb1fc6b2e0>", "_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at 0x7fcb1fc6b370>", "_build": "<function ActorCriticPolicy._build at 0x7fcb1fc6b400>", "forward": "<function ActorCriticPolicy.forward at 0x7fcb1fc6b490>", "extract_features": "<function ActorCriticPolicy.extract_features at 0x7fcb1fc6b520>", "_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at 0x7fcb1fc6b5b0>", "_predict": "<function ActorCriticPolicy._predict at 0x7fcb1fc6b640>", "evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at 0x7fcb1fc6b6d0>", "get_distribution": "<function ActorCriticPolicy.get_distribution at 0x7fcb1fc6b760>", "predict_values": "<function ActorCriticPolicy.predict_values at 0x7fcb1fc6b7f0>", "__abstractmethods__": "frozenset()", "_abc_impl": "<_abc._abc_data object at 0x7fcb28da9fc0>"}, "verbose": 1, "policy_kwargs": {}, "num_timesteps": 1015808, "_total_timesteps": 1000000, "_num_timesteps_at_start": 0, "seed": null, "action_noise": null, "start_time": 1706077419959376394, "learning_rate": 0.0003, "tensorboard_log": null, "_last_obs": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVdQIAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYAAgAAAAAAAE0dOL1x7Um5xPeNOyCDTjgEOx+62ymCuAAAgD8AAAAAmsdpva6NpLqNzzI4TUllM5TLjzpBS0y3AACAPwAAgD9m1kK7XHsPuh1GDzvAvBQ2rCJVO3hDKroAAIA/AACAP2aJNj2PXku6W6uYuy5ZgDhdCca6QgMkOQAAgD8AAIA/GmgNvYUzkLkw5sC4tJAgNYug3juqsZS0AACAPwAAgD+zzwO9FIKCuoTGgTu1CY04OvL+uiv/9LkAAIA/AACAP6bagL3sKf+5fuGkunSdSbbZFZc76rvAOQAAgD8AAIA/mkX6O0jDh7pdSfu6d7AetJ7irbozPBI6AACAPwAAgD+arss8VFxDPmcUDb2vMTO+KBCSvY1jI7wAAAAAAAAAAM0cczsUPIO6Kk0suzwwwLbZ9Jm6MrtIOgAAgD8AAIA/TS0tvYQNSj42LJS88VBpvoZEn7yqawa9AAAAAAAAAABmXZO816NHt337cLQ1rdgvdZlYOxwunDMAAIA/AACAP83RjjwK52652h7zuYmG+LMEC2e7RvUROQAAgD8AAIA/Zon4vMNVWLqoMD83+nzOMi6HqDvCgl62AACAPwAAgD/Ngca8Q/sCvNpr+j2R1wW9zQgtPSqJOD4AAIA/AACAPwB7vrz0BcG8bhOKPS4ctju45rG9IfoZvgAAgD8AAIA/lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYksQSwiGlIwBQ5R0lFKULg=="}, "_last_episode_starts": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVgwAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACUjAVudW1weZSMBWR0eXBllJOUjAJiMZSJiIeUUpQoSwOMAXyUTk5OSv////9K/////0sAdJRiSxCFlIwBQ5R0lFKULg=="}, "_last_original_obs": null, "_episode_num": 0, "use_sde": false, "sde_sample_freq": -1, "_current_progress_remaining": -0.015808000000000044, "_stats_window_size": 100, "ep_info_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVOwwAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpRHQGLg06xPfsOMAWyUTegDjAF0lEdAlYUGCEpRXXV9lChoBkdAXdQw22oegmgHTegDaAhHQJWH78WKuSx1fZQoaAZHQGCg7WEsasJoB03oA2gIR0CViVwEQoTgdX2UKGgGR0Bmt+1YyO7yaAdN6ANoCEdAlYv6NEPUa3V9lChoBkdAZWKpiI+GGmgHTegDaAhHQJWVpWluWKN1fZQoaAZHQGBKYNI9TxZoB03oA2gIR0CVlm22G7BgdX2UKGgGR0BxeTMY/FBIaAdNEgFoCEdAlasBeokzGnV9lChoBkdAZJm0MPSUkmgHTegDaAhHQJWvl+qioKl1fZQoaAZHQGBwUQkHD79oB03oA2gIR0CVsGGpMpPRdX2UKGgGR0Bm9ClpGnXNaAdN6ANoCEdAlbIctK7I1nV9lChoBkdAZ1gDkELYw2gHTegDaAhHQJWyby8SPEN1fZQoaAZHQE/RFKkEcKhoB0vQaAhHQJWz2E384xV1fZQoaAZHQGY2Onl4keJoB03oA2gIR0CVtFiqhlDndX2UKGgGR0BTbSqQzUI+aAdL72gIR0CVtZtapxWDdX2UKGgGR0Bj9ZJ2+wkgaAdN6ANoCEdAlbk9vKlpGnV9lChoBkdAQjKMBIWgvmgHS8VoCEdAlbpwR5C4SnV9lChoBkdAY/1l5GBnSWgHTegDaAhHQJW/f+wTufF1fZQoaAZHQGa9VTR6WxBoB03oA2gIR0CVwLIMjNY9dX2UKGgGR0BlO1aGHpKSaAdN6ANoCEdAlcbAN9YwI3V9lChoBkdAaE9pGnXNDGgHTegDaAhHQJXJpS/CZWt1fZQoaAZHQGe74hUzbexoB03oA2gIR0CVy57IT4+KdX2UKGgGR0BjoRNdqtYCaAdN6ANoCEdAldC5zLfUF3V9lChoBkdAYsDXPJJXhmgHTegDaAhHQJXUnhegL7Z1fZQoaAZHQE3MG9Htnf5oB0vWaAhHQJXZpntfG+91fZQoaAZHQGOSJUgjhUBoB03oA2gIR0CV4IKZDzAfdX2UKGgGR0Bi0pAprk8zaAdN6ANoCEdAlfbMpLEk0XV9lChoBkdAZPzg4Otnw2gHTegDaAhHQJX3yR7qptJ1fZQoaAZHQGQsx1oxpL5oB03oA2gIR0CV+lvfj0cwdX2UKGgGR0BkkD1f3N9qaAdN6ANoCEdAlfwrpFCswXV9lChoBkdAYpQpLmITG2gHTegDaAhHQJX84v8IiTt1fZQoaAZHQGNVpF1B+nZoB03oA2gIR0CV/oMBZIQOdX2UKGgGR0BkBIre67NCaAdN6ANoCEdAlgTo8dPtUnV9lChoBkdAaAH6uW8h92gHTegDaAhHQJYG4Py08eV1fZQoaAZHQGn+AZjx0+1oB03oA2gIR0CWDM9HMEA6dX2UKGgGR0BgDxRuTA32aAdN6ANoCEdAlg45OerdWXV9lChoBkdAcROkauOjqWgHTT8BaAhHQJYQdnlGPPt1fZQoaAZHQHHJZ/0/W2BoB03MAWgIR0CWE0phF3INdX2UKGgGR0BjVcn9ehPCaAdN6ANoCEdAlhRaN+9alnV9lChoBkdAaOFyksSTQmgHTegDaAhHQJYW+/rSmZV1fZQoaAZHQGN2AVwgkkdoB03oA2gIR0CWHWyckMTfdX2UKGgGR0BkMaGUOd5IaAdN6ANoCEdAliBm8h9srXV9lChoBkdAYuBTmW+oL2gHTegDaAhHQJYkuo0hvBJ1fZQoaAZHQHLJKcAiml9oB01tAmgIR0CWKJETxoZidX2UKGgGR0BjNrP2PDHfaAdN6ANoCEdAlitSVGCqZXV9lChoBkdAYli+0PYnOWgHTegDaAhHQJZCg1gpjMF1fZQoaAZHQGLxHcL0BfdoB03oA2gIR0CWRdJVbRnfdX2UKGgGR0Bk5K3EyckMaAdN6ANoCEdAlkdyWRigCnV9lChoBkdAZ7BJdSl3yWgHTegDaAhHQJZJs6bONYN1fZQoaAZHQHC13Gn4wh5oB03MAmgIR0CWSeuMdcSodX2UKGgGR0BWDIgV45cUaAdLsWgIR0CWS9sr/bTMdX2UKGgGR0Bdhp5eJHiFaAdN6ANoCEdAlk3w00m+kHV9lChoBkdAY7phG6PKdWgHTegDaAhHQJZVCYOUdJd1fZQoaAZHQGNYvIOpbUxoB03oA2gIR0CWVj8VHnU2dX2UKGgGR0BnB3nwG4ZuaAdN6ANoCEdAlltmqDK5kXV9lChoBkdAYx85Fw1iv2gHTegDaAhHQJZcmV9nbqR1fZQoaAZHQGCa7GNrCWNoB03oA2gIR0CWYHKdQO4HdX2UKGgGR0BoG4atLcsUaAdN6ANoCEdAlmgfgNwzcnV9lChoBkdAbmkbZvkzXWgHTWkBaAhHQJZqYn9ehPF1fZQoaAZHQGR7JGnXNC9oB03oA2gIR0CWaqg/C66KdX2UKGgGR0BhLdxCIDYAaAdN6ANoCEdAlm5rJ8v25HV9lChoBkdAY11Tvy9VWGgHTegDaAhHQJZxsyAQQMB1fZQoaAZHQGYgv/JeVs1oB03oA2gIR0CWdCopx3mndX2UKGgGR0BgDxEH+qBFaAdN6ANoCEdAloyrR0EHMXV9lChoBkdAbyxakAPuomgHTQoCaAhHQJaNuvaDf3x1fZQoaAZHQGN+VclgMMJoB03oA2gIR0CWjta99MK1dX2UKGgGR0BmUprULDyfaAdN6ANoCEdAlpHOgxrSE3V9lChoBkdAXsnYFqzqr2gHTegDaAhHQJaSES26TW51fZQoaAZHQGG7Ukv9LpRoB03oA2gIR0CWlM/R3NcGdX2UKGgGR0BnfAmsvIwNaAdN6ANoCEdAlpbm6oVEeHV9lChoBkdAYEivQF9roGgHTegDaAhHQJafK8M/hVF1fZQoaAZHQHJ7rlA/s3RoB01aAWgIR0CWoWrv9cbBdX2UKGgGR0Byk/aTOgQIaAdNrQNoCEdAlqJueBg/knV9lChoBkdAZLRAbADaG2gHTegDaAhHQJakbo+wC8x1fZQoaAZHQG3R/E4vN/xoB01bA2gIR0CWpzuv2Xb/dX2UKGgGR0Bikf5aePJaaAdN6ANoCEdAlrCLRa5f+nV9lChoBkdAZ0ObLEDQq2gHTegDaAhHQJaw3Cbc45t1fZQoaAZHQGS5KZ2IO6NoB03oA2gIR0CWtKxDb8FZdX2UKGgGR0Byf329L6DXaAdNoQJoCEdAlrUKHXVbzXV9lChoBkdAcUHSpzcRDmgHTYwDaAhHQJa1zpNbkfd1fZQoaAZHQGWcmGucME1oB03oA2gIR0CWt6dSEUTMdX2UKGgGR0ByIKattALRaAdL82gIR0CW0k/Aj6eodX2UKGgGR0BxMfGza9K3aAdNfgJoCEdAltLyFPBSDXV9lChoBkdAaiJZ/0/W2GgHTegDaAhHQJbS/1Iy0rt1fZQoaAZHQF2cgpSaVlhoB03oA2gIR0CW1GNlAeJYdX2UKGgGR0BlHtdRiw0PaAdN6ANoCEdAltZCed07sHV9lChoBkdAXy4cXFcY7GgHTegDaAhHQJbYQDQqqfh1fZQoaAZHQGZQiosI3R5oB03oA2gIR0CW2ixkupS8dX2UKGgGR0BybaBRQ79yaAdNXQNoCEdAlt3Er5IpY3V9lChoBkdAS5UgU1yeZ2gHS6VoCEdAlt+6ziS7oXV9lChoBkdAcGhXHzYmLWgHTWYDaAhHQJbf7TZxrBV1fZQoaAZHQGRUqWTot+VoB03oA2gIR0CW4Xp9qk/KdX2UKGgGR0Bjx3CwbEP2aAdN6ANoCEdAluiH3Hq/unV9lChoBkdAU5whHLA572gHS9ZoCEdAluuY2jwhGHV9lChoBkdAch7iF0xM4GgHTUADaAhHQJbu3ggow251fZQoaAZHQGheXWvr4WVoB03oA2gIR0CW8588La24dX2UKGgGR0BhYJX4j8k2aAdN6ANoCEdAlvPj9KmKqHV9lChoBkdAUH8v8IiTuGgHS+RoCEdAlvhQGbCrLnV9lChoBkdAZTCq4pc5bWgHTegDaAhHQJb4ihM8HOd1fZQoaAZHQHGvrs8gZCRoB01IAWgIR0CW+gbSJCSidX2UKGgGR0BzOxOSGJvYaAdN5QNoCEdAlvpSMtK7I3VlLg=="}, "ep_success_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="}, "_n_updates": 248, "observation_space": {":type:": "<class 'gymnasium.spaces.box.Box'>", ":serialized:": "gAWVcAIAAAAAAACMFGd5bW5hc2l1bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lGgFk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMDWJvdW5kZWRfYmVsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWCAAAAAAAAAABAQEBAQEBAZRoB4wCYjGUiYiHlFKUKEsDjAF8lE5OTkr/////Sv////9LAHSUYksIhZSMAUOUdJRSlIwNYm91bmRlZF9hYm92ZZRoECiWCAAAAAAAAAABAQEBAQEBAZRoFEsIhZRoGHSUUpSMBl9zaGFwZZRLCIWUjANsb3eUaBAoliAAAAAAAAAAAAC0wgAAtMIAAKDAAACgwNsPScAAAKDAAAAAgAAAAICUaApLCIWUaBh0lFKUjARoaWdolGgQKJYgAAAAAAAAAAAAtEIAALRCAACgQAAAoEDbD0lAAACgQAAAgD8AAIA/lGgKSwiFlGgYdJRSlIwIbG93X3JlcHKUjFtbLTkwLiAgICAgICAgLTkwLiAgICAgICAgIC01LiAgICAgICAgIC01LiAgICAgICAgIC0zLjE0MTU5MjcgIC01LgogIC0wLiAgICAgICAgIC0wLiAgICAgICBdlIwJaGlnaF9yZXBylIxTWzkwLiAgICAgICAgOTAuICAgICAgICAgNS4gICAgICAgICA1LiAgICAgICAgIDMuMTQxNTkyNyAgNS4KICAxLiAgICAgICAgIDEuICAgICAgIF2UjApfbnBfcmFuZG9tlE51Yi4=", "dtype": "float32", "bounded_below": "[ True True True True True True True True]", "bounded_above": "[ True True True True True True True True]", "_shape": [8], "low": "[-90. -90. -5. -5. -3.1415927 -5.\n -0. -0. ]", "high": "[90. 90. 5. 5. 3.1415927 5.\n 1. 1. ]", "low_repr": "[-90. -90. -5. -5. -3.1415927 -5.\n -0. -0. ]", "high_repr": "[90. 90. 5. 5. 3.1415927 5.\n 1. 1. ]", "_np_random": null}, "action_space": {":type:": "<class 'gymnasium.spaces.discrete.Discrete'>", ":serialized:": "gAWV1QAAAAAAAACMGWd5bW5hc2l1bS5zcGFjZXMuZGlzY3JldGWUjAhEaXNjcmV0ZZSTlCmBlH2UKIwBbpSMFW51bXB5LmNvcmUubXVsdGlhcnJheZSMBnNjYWxhcpSTlIwFbnVtcHmUjAVkdHlwZZSTlIwCaTiUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYkMIBAAAAAAAAACUhpRSlIwFc3RhcnSUaAhoDkMIAAAAAAAAAACUhpRSlIwGX3NoYXBllCloCmgOjApfbnBfcmFuZG9tlE51Yi4=", "n": "4", "start": "0", "_shape": [], "dtype": "int64", "_np_random": null}, "n_envs": 16, "n_steps": 1024, "gamma": 0.999, "gae_lambda": 0.98, "ent_coef": 0.01, "vf_coef": 0.5, "max_grad_norm": 0.5, "batch_size": 64, "n_epochs": 4, "clip_range": {":type:": "<class 'function'>", ":serialized:": "gAWVxQIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMSS91c3IvbG9jYWwvbGliL3B5dGhvbjMuMTAvZGlzdC1wYWNrYWdlcy9zdGFibGVfYmFzZWxpbmVzMy9jb21tb24vdXRpbHMucHmUjARmdW5jlEuEQwIEAZSMA3ZhbJSFlCl0lFKUfZQojAtfX3BhY2thZ2VfX5SMGHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbpSMCF9fbmFtZV9flIwec3RhYmxlX2Jhc2VsaW5lczMuY29tbW9uLnV0aWxzlIwIX19maWxlX1+UjEkvdXNyL2xvY2FsL2xpYi9weXRob24zLjEwL2Rpc3QtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lHVOTmgAjBBfbWFrZV9lbXB0eV9jZWxslJOUKVKUhZR0lFKUjBxjbG91ZHBpY2tsZS5jbG91ZHBpY2tsZV9mYXN0lIwSX2Z1bmN0aW9uX3NldHN0YXRllJOUaB99lH2UKGgWaA2MDF9fcXVhbG5hbWVfX5SMGWNvbnN0YW50X2ZuLjxsb2NhbHM+LmZ1bmOUjA9fX2Fubm90YXRpb25zX1+UfZSMDl9fa3dkZWZhdWx0c19flE6MDF9fZGVmYXVsdHNfX5ROjApfX21vZHVsZV9flGgXjAdfX2RvY19flE6MC19fY2xvc3VyZV9flGgAjApfbWFrZV9jZWxslJOURz/JmZmZmZmahZRSlIWUjBdfY2xvdWRwaWNrbGVfc3VibW9kdWxlc5RdlIwLX19nbG9iYWxzX1+UfZR1hpSGUjAu"}, "clip_range_vf": null, "normalize_advantage": true, "target_kl": null, "lr_schedule": {":type:": "<class 'function'>", ":serialized:": "gAWVxQIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMSS91c3IvbG9jYWwvbGliL3B5dGhvbjMuMTAvZGlzdC1wYWNrYWdlcy9zdGFibGVfYmFzZWxpbmVzMy9jb21tb24vdXRpbHMucHmUjARmdW5jlEuEQwIEAZSMA3ZhbJSFlCl0lFKUfZQojAtfX3BhY2thZ2VfX5SMGHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbpSMCF9fbmFtZV9flIwec3RhYmxlX2Jhc2VsaW5lczMuY29tbW9uLnV0aWxzlIwIX19maWxlX1+UjEkvdXNyL2xvY2FsL2xpYi9weXRob24zLjEwL2Rpc3QtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lHVOTmgAjBBfbWFrZV9lbXB0eV9jZWxslJOUKVKUhZR0lFKUjBxjbG91ZHBpY2tsZS5jbG91ZHBpY2tsZV9mYXN0lIwSX2Z1bmN0aW9uX3NldHN0YXRllJOUaB99lH2UKGgWaA2MDF9fcXVhbG5hbWVfX5SMGWNvbnN0YW50X2ZuLjxsb2NhbHM+LmZ1bmOUjA9fX2Fubm90YXRpb25zX1+UfZSMDl9fa3dkZWZhdWx0c19flE6MDF9fZGVmYXVsdHNfX5ROjApfX21vZHVsZV9flGgXjAdfX2RvY19flE6MC19fY2xvc3VyZV9flGgAjApfbWFrZV9jZWxslJOURz8zqSowVTJhhZRSlIWUjBdfY2xvdWRwaWNrbGVfc3VibW9kdWxlc5RdlIwLX19nbG9iYWxzX1+UfZR1hpSGUjAu"}, "system_info": {"OS": "Linux-6.1.58+-x86_64-with-glibc2.35 # 1 SMP PREEMPT_DYNAMIC Sat Nov 18 15:31:17 UTC 2023", "Python": "3.10.12", "Stable-Baselines3": "2.0.0a5", "PyTorch": "2.1.0+cu121", "GPU Enabled": "True", "Numpy": "1.23.5", "Cloudpickle": "2.2.1", "Gymnasium": "0.28.1", "OpenAI Gym": "0.25.2"}}
|
|
|
1 |
+
{"policy_class": {":type:": "<class 'abc.ABCMeta'>", ":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==", "__module__": "stable_baselines3.common.policies", "__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ", "__init__": "<function ActorCriticPolicy.__init__ at 0x7b27ce13a830>", "_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at 0x7b27ce13a8c0>", "reset_noise": "<function ActorCriticPolicy.reset_noise at 0x7b27ce13a950>", "_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at 0x7b27ce13a9e0>", "_build": "<function ActorCriticPolicy._build at 0x7b27ce13aa70>", "forward": "<function ActorCriticPolicy.forward at 0x7b27ce13ab00>", "extract_features": "<function ActorCriticPolicy.extract_features at 0x7b27ce13ab90>", "_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at 0x7b27ce13ac20>", "_predict": "<function ActorCriticPolicy._predict at 0x7b27ce13acb0>", "evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at 0x7b27ce13ad40>", "get_distribution": "<function ActorCriticPolicy.get_distribution at 0x7b27ce13add0>", "predict_values": "<function ActorCriticPolicy.predict_values at 0x7b27ce13ae60>", "__abstractmethods__": "frozenset()", "_abc_impl": "<_abc._abc_data object at 0x7b27ce2d1600>"}, "verbose": 1, "policy_kwargs": {}, "num_timesteps": 1015808, "_total_timesteps": 1000000, "_num_timesteps_at_start": 0, "seed": null, "action_noise": null, "start_time": 1709604379319716265, "learning_rate": 0.0003, "tensorboard_log": null, "_last_obs": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVdQIAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYAAgAAAAAAAM1Uuzz79rQ94msZPmbNTr7QJag9kM50PQAAAAAAAAAA5mkAvU9EqD6PFca8K0otvu2Qmb0q3/K8AAAAAAAAAACAFXi97R4OP6UW6LwmUje+BDnYvc76Xj0AAAAAAAAAAMA4+L33F4M/W+IPvtp4mb6HLsK9qHqBvQAAAAAAAAAAs5BZPbyXmT9DJQk+qgOIvusa8TzsTyc+AAAAAAAAAACaeco6E7lMP/3H5LqXQE2+n9+VvJi2Mb4AAAAAAAAAAIADm73xRCc/rg4gvWHxG7707LC9WBSQuwAAAAAAAAAAJuTlvRNZBz910S89eRiFvjFEJL3mLkm8AAAAAAAAAACacQU9r8ShPyNWWT7iIp2+hXfXO747Sj0AAAAAAAAAABosTL0Ty88+kxouvVLjhr513Jy9vfuDvQAAAAAAAAAAZt1Kvf3OGz791A4+oec7vlne6jwPuDc9AAAAAAAAAAAGOwy+zP9AP4LzzrwLAnq+7rCOvbCBkr0AAAAAAAAAAMaRq77Vbz8/yp+jPXyZF74qeyS9MEWovQAAAAAAAAAA5hdCvR8JbD6GcGM9HpdUvmgoFL2/hjg9AAAAAAAAAAAaDJ09H72auU7EIbr3FGa0f27JuhqlPjkAAAAAAACAPzN4Aj12RB0/1l4fvs7KRb4h0iW9CxM7ugAAAAAAAAAAlIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYksQSwiGlIwBQ5R0lFKULg=="}, "_last_episode_starts": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVgwAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACUjAVudW1weZSMBWR0eXBllJOUjAJiMZSJiIeUUpQoSwOMAXyUTk5OSv////9K/////0sAdJRiSxCFlIwBQ5R0lFKULg=="}, "_last_original_obs": null, "_episode_num": 0, "use_sde": false, "sde_sample_freq": -1, "_current_progress_remaining": -0.015808000000000044, "_stats_window_size": 100, "ep_info_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVRAwAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpRHQHGuv0Zm7J6MAWyUTSECjAF0lEdAk1bL3TNMXnV9lChoBkdAbGs8La24NWgHTWMBaAhHQJNYCVUuL751fZQoaAZHQG3bxs2vStxoB018AWgIR0CTWHBE8aGYdX2UKGgGR0BwA53iaRZEaAdNggFoCEdAk1mV3t8eCHV9lChoBkdAcdQ+jM3ZPGgHTXMBaAhHQJNa4BEKE391fZQoaAZHQHCT7YXfqHJoB017AWgIR0CTW47l7tzCdX2UKGgGR0ByopmFrVOLaAdNeAFoCEdAk1vMry1/lXV9lChoBkdAXBfsZ5zHTGgHTegDaAhHQJNdXd30PH11fZQoaAZHQHH3/+GXXy1oB03DAWgIR0CTXbAggX/HdX2UKGgGR0BvThmVZ9uxaAdNfQFoCEdAk14zollbvHV9lChoBkdAbvtP6be/H2gHTcMBaAhHQJNeVYbKifx1fZQoaAZHQHDoSHM2WIJoB02YAWgIR0CTYMBw++uedX2UKGgGR0Bx1xPgvUSaaAdNaQFoCEdAk3U3vYvnKXV9lChoBkdAcRXqnFYMfGgHTXIBaAhHQJN2god+5OJ1fZQoaAZHQELYYLLIPsloB004AWgIR0CTdqTPBzmwdX2UKGgGR0A+Ug3974SIaAdNKQFoCEdAk3gZFG5MDnV9lChoBkdAa6jaB7NSqGgHTVoBaAhHQJN470pVjqh1fZQoaAZHQHJyzVx0dR1oB005AWgIR0CTedzollbvdX2UKGgGR0BvgnXAdn01aAdNdAFoCEdAk3pkJv5xi3V9lChoBkdAabXBkZrHl2gHTQgDaAhHQJN7CYsunMt1fZQoaAZHQGwnFQMx46hoB01AAWgIR0CTfEN+b3GodX2UKGgGR0BsdF3Ux20RaAdNRgFoCEdAk315x//ecnV9lChoBkdAb9Oj9n9NvmgHTZQBaAhHQJN99gc94eN1fZQoaAZHQHCygK8cuJ1oB02TAWgIR0CTfh2OQyRCdX2UKGgGR0ByeLbmEGqxaAdNNwFoCEdAk39Ojh1klXV9lChoBkdAcb4dV/+bVmgHTZ0BaAhHQJOAJbKRuCR1fZQoaAZHQG9pRbjcVQBoB03SAWgIR0CTgYGZ/kNndX2UKGgGR0BxmcpXp4bCaAdNTgFoCEdAk4HOmNzbOHV9lChoBkdAcYbKKHfuTmgHTUkBaAhHQJOCsq0+kgx1fZQoaAZHQHFoqRU3n6loB013AWgIR0CThIEvTPSldX2UKGgGR0BG3VyvLX+VaAdNJgFoCEdAk4TLEpAlfXV9lChoBkdAcmJdCE6DG2gHTUsBaAhHQJOE8Bnzxw11fZQoaAZHQGIShhpg1FZoB03oA2gIR0CThVsq8UVSdX2UKGgGR0Bw154ptrKvaAdNZgFoCEdAk4aVBMSK33V9lChoBkdAcNazqbBoEmgHTVQBaAhHQJOG8RHww0x1fZQoaAZHQFIoiONo8IRoB00JAWgIR0CThyEWZZ0TdX2UKGgGR0Bsl0l5WzWxaAdNUQFoCEdAk4juEqUeMnV9lChoBkdAcHxtHhCMP2gHTXYBaAhHQJOJJIXj2jB1fZQoaAZHQHDSRKpT/AFoB03gAWgIR0CTiU23KB/adX2UKGgGR0BwxxwFTvRaaAdNYAFoCEdAk4te/k/8mHV9lChoBkdAbw3z0Yj0MGgHTZgBaAhHQJOL1KmKqGV1fZQoaAZHQHD4pavA44poB01yAWgIR0CTjpSF49owdX2UKGgGR0BuEzAnDziCaAdNcAFoCEdAk5ACkj5bhXV9lChoBkdAb4KjB2wFDGgHTU8BaAhHQJOQttdiUgV1fZQoaAZHQG6rRpUPxx1oB01eAWgIR0CTkm0QbuMNdX2UKGgGR0BtLTCWNWELaAdNSgFoCEdAk5M5F1B+nnV9lChoBkdAcBMN0vGp/GgHTXoBaAhHQJOTOiL2pQ11fZQoaAZHQHG616qsEJVoB007AWgIR0CTk0hV2icodX2UKGgGR0BxIlzMibDuaAdNigFoCEdAk5PLQHAymHV9lChoBkdAb7w2itaIN2gHTV8BaAhHQJOUyraM72d1fZQoaAZHQEczthuwX69oB00xAWgIR0CTlXaUA1ejdX2UKGgGR0BwuCzLOiWWaAdNRgJoCEdAk5aPOD8Lr3V9lChoBkdAb0D4nndO7GgHTYEBaAhHQJOZzbrTpgV1fZQoaAZHQHJWTXvphWpoB01QAWgIR0CTmyXZXdTHdX2UKGgGR0Bu4/+uNgjRaAdNgAJoCEdAk5vCMHbAUXV9lChoBkdAcotSKm8/U2gHTa8BaAhHQJOu78YQ8Ol1fZQoaAZHQGv2QzUI9kloB02FAWgIR0CTsLtrsSkCdX2UKGgGR0BvWJOWSlnAaAdNZgFoCEdAk7DRdyDIzXV9lChoBkdAcmKQPI4lyGgHTXIBaAhHQJOx8nw5NoJ1fZQoaAZHQHB+GlqJuVJoB01DAWgIR0CTsk1B+nZTdX2UKGgGR0BJQM5fdAPeaAdNLQFoCEdAk7KlIAfdRHV9lChoBkdAbLsna37UG2gHTWMBaAhHQJOzeR7qptJ1fZQoaAZHQHBdnYUWVNZoB011AWgIR0CTs4R7JGONdX2UKGgGR0BywRrULDyfaAdNaQFoCEdAk7QjzND+i3V9lChoBkdAcJSowEhaDGgHTUoBaAhHQJO0OJwbVBl1fZQoaAZHQHDlw4jrzGxoB008AWgIR0CTtIyiEg4fdX2UKGgGR0BwrqApazNVaAdNlwFoCEdAk7U18kUsWnV9lChoBkdAb0GFjd56dGgHTUMBaAhHQJO4IXUH6dl1fZQoaAZHQGyprAxi5NJoB01sAWgIR0CTuDSVGCqZdX2UKGgGR0BrtVJrcj7iaAdNYQFoCEdAk7ihn8Koh3V9lChoBkdAb02wB5ooNWgHTYMBaAhHQJO9HoJRfnh1fZQoaAZHQGzU3rdFfAtoB01dAWgIR0CTvWal1r6+dX2UKGgGR0ByIkBbOeJ6aAdNPwFoCEdAk723PzFuN3V9lChoBkdAa01KFqSHM2gHTXMBaAhHQJO+StA9mpV1fZQoaAZHQHG3XKKYRd1oB01bAWgIR0CTvo7kn1FpdX2UKGgGR0A04VUuL740aAdNPwFoCEdAk78lKwpvxnV9lChoBkdAb1sMKCxu9GgHTWIBaAhHQJO/m7VawEB1fZQoaAZHQGuP2rfcesBoB01XAWgIR0CTwA4Uvf0mdX2UKGgGR0Bw6zl90A93aAdNWwFoCEdAk8DfnW8RMHV9lChoBkdAbvze1rqMWGgHTU0BaAhHQJPBoaZQYUF1fZQoaAZHQG98aUJOWSloB01mAWgIR0CTwbsUIsy0dX2UKGgGR0BhNS6pYLb6aAdN6ANoCEdAk8HW+PBBRnV9lChoBkdAcB/ACGN70GgHTZ8BaAhHQJPDMrNGEwp1fZQoaAZHQGzYhjFyaNNoB01SAWgIR0CTxTd/8VHndX2UKGgGR0Bvd43gk1MuaAdNhgFoCEdAk8eS4z7/GXV9lChoBkdAbyrxIatLc2gHTVMBaAhHQJPMEF3Y+St1fZQoaAZHQG7PqFIuoP1oB01VAWgIR0CTzGob4rSWdX2UKGgGR0BtMW5OJtSAaAdNUwFoCEdAk80yt3fQ8nV9lChoBkdAcWEMNtqHoGgHTWABaAhHQJPOom4RVZN1fZQoaAZHQG2A3VTaTOhoB01WAWgIR0CTzrDNQj2SdX2UKGgGR0BxOC2CuloEaAdNVQFoCEdAk9Ahw2l2vHV9lChoBkdAcbW889wFT2gHTaUBaAhHQJPQQ7muDBd1fZQoaAZHQGyjzDn/1g9oB01KAWgIR0CT0IBd2PkrdX2UKGgGR0BwTez/p+tsaAdNTwFoCEdAk9DMwYcebXV9lChoBkdAbyvdi2DxsmgHTa8BaAhHQJPRUtCiRGN1fZQoaAZHQG56y7f51vFoB02MAWgIR0CT0Vplz2eydX2UKGgGR0Byg38qFyq/aAdNgwFoCEdAk9KTe9Ba93V9lChoBkdAb7oTtb9qDmgHTYUBaAhHQJPULDKoybh1fZQoaAZHQGp4A13t8eFoB01vAWgIR0CT1Q2/i5uqdX2UKGgGR0BxG9fAsTWYaAdNRwFoCEdAk9VepOvdM3VlLg=="}, "ep_success_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="}, "_n_updates": 248, "observation_space": {":type:": "<class 'gymnasium.spaces.box.Box'>", ":serialized:": "gAWVdgIAAAAAAACMFGd5bW5hc2l1bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lIwFZHR5cGWUk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMDWJvdW5kZWRfYmVsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWCAAAAAAAAAABAQEBAQEBAZRoCIwCYjGUiYiHlFKUKEsDjAF8lE5OTkr/////Sv////9LAHSUYksIhZSMAUOUdJRSlIwNYm91bmRlZF9hYm92ZZRoESiWCAAAAAAAAAABAQEBAQEBAZRoFUsIhZRoGXSUUpSMBl9zaGFwZZRLCIWUjANsb3eUaBEoliAAAAAAAAAAAAC0wgAAtMIAAKDAAACgwNsPScAAAKDAAAAAgAAAAICUaAtLCIWUaBl0lFKUjARoaWdolGgRKJYgAAAAAAAAAAAAtEIAALRCAACgQAAAoEDbD0lAAACgQAAAgD8AAIA/lGgLSwiFlGgZdJRSlIwIbG93X3JlcHKUjFtbLTkwLiAgICAgICAgLTkwLiAgICAgICAgIC01LiAgICAgICAgIC01LiAgICAgICAgIC0zLjE0MTU5MjcgIC01LgogIC0wLiAgICAgICAgIC0wLiAgICAgICBdlIwJaGlnaF9yZXBylIxTWzkwLiAgICAgICAgOTAuICAgICAgICAgNS4gICAgICAgICA1LiAgICAgICAgIDMuMTQxNTkyNyAgNS4KICAxLiAgICAgICAgIDEuICAgICAgIF2UjApfbnBfcmFuZG9tlE51Yi4=", "dtype": "float32", "bounded_below": "[ True True True True True True True True]", "bounded_above": "[ True True True True True True True True]", "_shape": [8], "low": "[-90. -90. -5. -5. -3.1415927 -5.\n -0. -0. ]", "high": "[90. 90. 5. 5. 3.1415927 5.\n 1. 1. ]", "low_repr": "[-90. -90. -5. -5. -3.1415927 -5.\n -0. -0. ]", "high_repr": "[90. 90. 5. 5. 3.1415927 5.\n 1. 1. ]", "_np_random": null}, "action_space": {":type:": "<class 'gymnasium.spaces.discrete.Discrete'>", ":serialized:": "gAWV2wAAAAAAAACMGWd5bW5hc2l1bS5zcGFjZXMuZGlzY3JldGWUjAhEaXNjcmV0ZZSTlCmBlH2UKIwBbpSMFW51bXB5LmNvcmUubXVsdGlhcnJheZSMBnNjYWxhcpSTlIwFbnVtcHmUjAVkdHlwZZSTlIwCaTiUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYkMIBAAAAAAAAACUhpRSlIwFc3RhcnSUaAhoDkMIAAAAAAAAAACUhpRSlIwGX3NoYXBllCmMBWR0eXBllGgOjApfbnBfcmFuZG9tlE51Yi4=", "n": "4", "start": "0", "_shape": [], "dtype": "int64", "_np_random": null}, "n_envs": 16, "n_steps": 1024, "gamma": 0.999, "gae_lambda": 0.98, "ent_coef": 0.01, "vf_coef": 0.5, "max_grad_norm": 0.5, "batch_size": 64, "n_epochs": 4, "clip_range": {":type:": "<class 'function'>", ":serialized:": "gAWVxQIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMSS91c3IvbG9jYWwvbGliL3B5dGhvbjMuMTAvZGlzdC1wYWNrYWdlcy9zdGFibGVfYmFzZWxpbmVzMy9jb21tb24vdXRpbHMucHmUjARmdW5jlEuEQwIEAZSMA3ZhbJSFlCl0lFKUfZQojAtfX3BhY2thZ2VfX5SMGHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbpSMCF9fbmFtZV9flIwec3RhYmxlX2Jhc2VsaW5lczMuY29tbW9uLnV0aWxzlIwIX19maWxlX1+UjEkvdXNyL2xvY2FsL2xpYi9weXRob24zLjEwL2Rpc3QtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lHVOTmgAjBBfbWFrZV9lbXB0eV9jZWxslJOUKVKUhZR0lFKUjBxjbG91ZHBpY2tsZS5jbG91ZHBpY2tsZV9mYXN0lIwSX2Z1bmN0aW9uX3NldHN0YXRllJOUaB99lH2UKGgWaA2MDF9fcXVhbG5hbWVfX5SMGWNvbnN0YW50X2ZuLjxsb2NhbHM+LmZ1bmOUjA9fX2Fubm90YXRpb25zX1+UfZSMDl9fa3dkZWZhdWx0c19flE6MDF9fZGVmYXVsdHNfX5ROjApfX21vZHVsZV9flGgXjAdfX2RvY19flE6MC19fY2xvc3VyZV9flGgAjApfbWFrZV9jZWxslJOURz/JmZmZmZmahZRSlIWUjBdfY2xvdWRwaWNrbGVfc3VibW9kdWxlc5RdlIwLX19nbG9iYWxzX1+UfZR1hpSGUjAu"}, "clip_range_vf": null, "normalize_advantage": true, "target_kl": null, "lr_schedule": {":type:": "<class 'function'>", ":serialized:": "gAWVxQIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMSS91c3IvbG9jYWwvbGliL3B5dGhvbjMuMTAvZGlzdC1wYWNrYWdlcy9zdGFibGVfYmFzZWxpbmVzMy9jb21tb24vdXRpbHMucHmUjARmdW5jlEuEQwIEAZSMA3ZhbJSFlCl0lFKUfZQojAtfX3BhY2thZ2VfX5SMGHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbpSMCF9fbmFtZV9flIwec3RhYmxlX2Jhc2VsaW5lczMuY29tbW9uLnV0aWxzlIwIX19maWxlX1+UjEkvdXNyL2xvY2FsL2xpYi9weXRob24zLjEwL2Rpc3QtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lHVOTmgAjBBfbWFrZV9lbXB0eV9jZWxslJOUKVKUhZR0lFKUjBxjbG91ZHBpY2tsZS5jbG91ZHBpY2tsZV9mYXN0lIwSX2Z1bmN0aW9uX3NldHN0YXRllJOUaB99lH2UKGgWaA2MDF9fcXVhbG5hbWVfX5SMGWNvbnN0YW50X2ZuLjxsb2NhbHM+LmZ1bmOUjA9fX2Fubm90YXRpb25zX1+UfZSMDl9fa3dkZWZhdWx0c19flE6MDF9fZGVmYXVsdHNfX5ROjApfX21vZHVsZV9flGgXjAdfX2RvY19flE6MC19fY2xvc3VyZV9flGgAjApfbWFrZV9jZWxslJOURz8zqSowVTJhhZRSlIWUjBdfY2xvdWRwaWNrbGVfc3VibW9kdWxlc5RdlIwLX19nbG9iYWxzX1+UfZR1hpSGUjAu"}, "system_info": {"OS": "Linux-6.1.58+-x86_64-with-glibc2.35 # 1 SMP PREEMPT_DYNAMIC Sat Nov 18 15:31:17 UTC 2023", "Python": "3.10.12", "Stable-Baselines3": "2.0.0a5", "PyTorch": "2.1.0+cu121", "GPU Enabled": "True", "Numpy": "1.25.2", "Cloudpickle": "2.2.1", "Gymnasium": "0.28.1", "OpenAI Gym": "0.25.2"}}
|
ppo-LunarLander-v2.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4e20e0d98cd1a5831b88b813d807519e0d7430cf768f3d9445d6149ae4ab6858
|
3 |
+
size 148088
|
ppo-LunarLander-v2/data
CHANGED
@@ -4,20 +4,20 @@
|
|
4 |
":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==",
|
5 |
"__module__": "stable_baselines3.common.policies",
|
6 |
"__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
|
7 |
-
"__init__": "<function ActorCriticPolicy.__init__ at
|
8 |
-
"_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at
|
9 |
-
"reset_noise": "<function ActorCriticPolicy.reset_noise at
|
10 |
-
"_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at
|
11 |
-
"_build": "<function ActorCriticPolicy._build at
|
12 |
-
"forward": "<function ActorCriticPolicy.forward at
|
13 |
-
"extract_features": "<function ActorCriticPolicy.extract_features at
|
14 |
-
"_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at
|
15 |
-
"_predict": "<function ActorCriticPolicy._predict at
|
16 |
-
"evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at
|
17 |
-
"get_distribution": "<function ActorCriticPolicy.get_distribution at
|
18 |
-
"predict_values": "<function ActorCriticPolicy.predict_values at
|
19 |
"__abstractmethods__": "frozenset()",
|
20 |
-
"_abc_impl": "<_abc._abc_data object at
|
21 |
},
|
22 |
"verbose": 1,
|
23 |
"policy_kwargs": {},
|
@@ -26,12 +26,12 @@
|
|
26 |
"_num_timesteps_at_start": 0,
|
27 |
"seed": null,
|
28 |
"action_noise": null,
|
29 |
-
"start_time":
|
30 |
"learning_rate": 0.0003,
|
31 |
"tensorboard_log": null,
|
32 |
"_last_obs": {
|
33 |
":type:": "<class 'numpy.ndarray'>",
|
34 |
-
":serialized:": "
|
35 |
},
|
36 |
"_last_episode_starts": {
|
37 |
":type:": "<class 'numpy.ndarray'>",
|
@@ -45,7 +45,7 @@
|
|
45 |
"_stats_window_size": 100,
|
46 |
"ep_info_buffer": {
|
47 |
":type:": "<class 'collections.deque'>",
|
48 |
-
":serialized:": "
|
49 |
},
|
50 |
"ep_success_buffer": {
|
51 |
":type:": "<class 'collections.deque'>",
|
@@ -54,7 +54,7 @@
|
|
54 |
"_n_updates": 248,
|
55 |
"observation_space": {
|
56 |
":type:": "<class 'gymnasium.spaces.box.Box'>",
|
57 |
-
":serialized:": "
|
58 |
"dtype": "float32",
|
59 |
"bounded_below": "[ True True True True True True True True]",
|
60 |
"bounded_above": "[ True True True True True True True True]",
|
@@ -69,7 +69,7 @@
|
|
69 |
},
|
70 |
"action_space": {
|
71 |
":type:": "<class 'gymnasium.spaces.discrete.Discrete'>",
|
72 |
-
":serialized:": "
|
73 |
"n": "4",
|
74 |
"start": "0",
|
75 |
"_shape": [],
|
|
|
4 |
":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==",
|
5 |
"__module__": "stable_baselines3.common.policies",
|
6 |
"__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
|
7 |
+
"__init__": "<function ActorCriticPolicy.__init__ at 0x7b27ce13a830>",
|
8 |
+
"_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at 0x7b27ce13a8c0>",
|
9 |
+
"reset_noise": "<function ActorCriticPolicy.reset_noise at 0x7b27ce13a950>",
|
10 |
+
"_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at 0x7b27ce13a9e0>",
|
11 |
+
"_build": "<function ActorCriticPolicy._build at 0x7b27ce13aa70>",
|
12 |
+
"forward": "<function ActorCriticPolicy.forward at 0x7b27ce13ab00>",
|
13 |
+
"extract_features": "<function ActorCriticPolicy.extract_features at 0x7b27ce13ab90>",
|
14 |
+
"_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at 0x7b27ce13ac20>",
|
15 |
+
"_predict": "<function ActorCriticPolicy._predict at 0x7b27ce13acb0>",
|
16 |
+
"evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at 0x7b27ce13ad40>",
|
17 |
+
"get_distribution": "<function ActorCriticPolicy.get_distribution at 0x7b27ce13add0>",
|
18 |
+
"predict_values": "<function ActorCriticPolicy.predict_values at 0x7b27ce13ae60>",
|
19 |
"__abstractmethods__": "frozenset()",
|
20 |
+
"_abc_impl": "<_abc._abc_data object at 0x7b27ce2d1600>"
|
21 |
},
|
22 |
"verbose": 1,
|
23 |
"policy_kwargs": {},
|
|
|
26 |
"_num_timesteps_at_start": 0,
|
27 |
"seed": null,
|
28 |
"action_noise": null,
|
29 |
+
"start_time": 1709604379319716265,
|
30 |
"learning_rate": 0.0003,
|
31 |
"tensorboard_log": null,
|
32 |
"_last_obs": {
|
33 |
":type:": "<class 'numpy.ndarray'>",
|
34 |
+
":serialized:": "gAWVdQIAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYAAgAAAAAAAM1Uuzz79rQ94msZPmbNTr7QJag9kM50PQAAAAAAAAAA5mkAvU9EqD6PFca8K0otvu2Qmb0q3/K8AAAAAAAAAACAFXi97R4OP6UW6LwmUje+BDnYvc76Xj0AAAAAAAAAAMA4+L33F4M/W+IPvtp4mb6HLsK9qHqBvQAAAAAAAAAAs5BZPbyXmT9DJQk+qgOIvusa8TzsTyc+AAAAAAAAAACaeco6E7lMP/3H5LqXQE2+n9+VvJi2Mb4AAAAAAAAAAIADm73xRCc/rg4gvWHxG7707LC9WBSQuwAAAAAAAAAAJuTlvRNZBz910S89eRiFvjFEJL3mLkm8AAAAAAAAAACacQU9r8ShPyNWWT7iIp2+hXfXO747Sj0AAAAAAAAAABosTL0Ty88+kxouvVLjhr513Jy9vfuDvQAAAAAAAAAAZt1Kvf3OGz791A4+oec7vlne6jwPuDc9AAAAAAAAAAAGOwy+zP9AP4LzzrwLAnq+7rCOvbCBkr0AAAAAAAAAAMaRq77Vbz8/yp+jPXyZF74qeyS9MEWovQAAAAAAAAAA5hdCvR8JbD6GcGM9HpdUvmgoFL2/hjg9AAAAAAAAAAAaDJ09H72auU7EIbr3FGa0f27JuhqlPjkAAAAAAACAPzN4Aj12RB0/1l4fvs7KRb4h0iW9CxM7ugAAAAAAAAAAlIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYksQSwiGlIwBQ5R0lFKULg=="
|
35 |
},
|
36 |
"_last_episode_starts": {
|
37 |
":type:": "<class 'numpy.ndarray'>",
|
|
|
45 |
"_stats_window_size": 100,
|
46 |
"ep_info_buffer": {
|
47 |
":type:": "<class 'collections.deque'>",
|
48 |
+
":serialized:": "gAWVRAwAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpRHQHGuv0Zm7J6MAWyUTSECjAF0lEdAk1bL3TNMXnV9lChoBkdAbGs8La24NWgHTWMBaAhHQJNYCVUuL751fZQoaAZHQG3bxs2vStxoB018AWgIR0CTWHBE8aGYdX2UKGgGR0BwA53iaRZEaAdNggFoCEdAk1mV3t8eCHV9lChoBkdAcdQ+jM3ZPGgHTXMBaAhHQJNa4BEKE391fZQoaAZHQHCT7YXfqHJoB017AWgIR0CTW47l7tzCdX2UKGgGR0ByopmFrVOLaAdNeAFoCEdAk1vMry1/lXV9lChoBkdAXBfsZ5zHTGgHTegDaAhHQJNdXd30PH11fZQoaAZHQHH3/+GXXy1oB03DAWgIR0CTXbAggX/HdX2UKGgGR0BvThmVZ9uxaAdNfQFoCEdAk14zollbvHV9lChoBkdAbvtP6be/H2gHTcMBaAhHQJNeVYbKifx1fZQoaAZHQHDoSHM2WIJoB02YAWgIR0CTYMBw++uedX2UKGgGR0Bx1xPgvUSaaAdNaQFoCEdAk3U3vYvnKXV9lChoBkdAcRXqnFYMfGgHTXIBaAhHQJN2god+5OJ1fZQoaAZHQELYYLLIPsloB004AWgIR0CTdqTPBzmwdX2UKGgGR0A+Ug3974SIaAdNKQFoCEdAk3gZFG5MDnV9lChoBkdAa6jaB7NSqGgHTVoBaAhHQJN470pVjqh1fZQoaAZHQHJyzVx0dR1oB005AWgIR0CTedzollbvdX2UKGgGR0BvgnXAdn01aAdNdAFoCEdAk3pkJv5xi3V9lChoBkdAabXBkZrHl2gHTQgDaAhHQJN7CYsunMt1fZQoaAZHQGwnFQMx46hoB01AAWgIR0CTfEN+b3GodX2UKGgGR0BsdF3Ux20RaAdNRgFoCEdAk315x//ecnV9lChoBkdAb9Oj9n9NvmgHTZQBaAhHQJN99gc94eN1fZQoaAZHQHCygK8cuJ1oB02TAWgIR0CTfh2OQyRCdX2UKGgGR0ByeLbmEGqxaAdNNwFoCEdAk39Ojh1klXV9lChoBkdAcb4dV/+bVmgHTZ0BaAhHQJOAJbKRuCR1fZQoaAZHQG9pRbjcVQBoB03SAWgIR0CTgYGZ/kNndX2UKGgGR0BxmcpXp4bCaAdNTgFoCEdAk4HOmNzbOHV9lChoBkdAcYbKKHfuTmgHTUkBaAhHQJOCsq0+kgx1fZQoaAZHQHFoqRU3n6loB013AWgIR0CThIEvTPSldX2UKGgGR0BG3VyvLX+VaAdNJgFoCEdAk4TLEpAlfXV9lChoBkdAcmJdCE6DG2gHTUsBaAhHQJOE8Bnzxw11fZQoaAZHQGIShhpg1FZoB03oA2gIR0CThVsq8UVSdX2UKGgGR0Bw154ptrKvaAdNZgFoCEdAk4aVBMSK33V9lChoBkdAcNazqbBoEmgHTVQBaAhHQJOG8RHww0x1fZQoaAZHQFIoiONo8IRoB00JAWgIR0CThyEWZZ0TdX2UKGgGR0Bsl0l5WzWxaAdNUQFoCEdAk4juEqUeMnV9lChoBkdAcHxtHhCMP2gHTXYBaAhHQJOJJIXj2jB1fZQoaAZHQHDSRKpT/AFoB03gAWgIR0CTiU23KB/adX2UKGgGR0BwxxwFTvRaaAdNYAFoCEdAk4te/k/8mHV9lChoBkdAbw3z0Yj0MGgHTZgBaAhHQJOL1KmKqGV1fZQoaAZHQHD4pavA44poB01yAWgIR0CTjpSF49owdX2UKGgGR0BuEzAnDziCaAdNcAFoCEdAk5ACkj5bhXV9lChoBkdAb4KjB2wFDGgHTU8BaAhHQJOQttdiUgV1fZQoaAZHQG6rRpUPxx1oB01eAWgIR0CTkm0QbuMNdX2UKGgGR0BtLTCWNWELaAdNSgFoCEdAk5M5F1B+nnV9lChoBkdAcBMN0vGp/GgHTXoBaAhHQJOTOiL2pQ11fZQoaAZHQHG616qsEJVoB007AWgIR0CTk0hV2icodX2UKGgGR0BxIlzMibDuaAdNigFoCEdAk5PLQHAymHV9lChoBkdAb7w2itaIN2gHTV8BaAhHQJOUyraM72d1fZQoaAZHQEczthuwX69oB00xAWgIR0CTlXaUA1ejdX2UKGgGR0BwuCzLOiWWaAdNRgJoCEdAk5aPOD8Lr3V9lChoBkdAb0D4nndO7GgHTYEBaAhHQJOZzbrTpgV1fZQoaAZHQHJWTXvphWpoB01QAWgIR0CTmyXZXdTHdX2UKGgGR0Bu4/+uNgjRaAdNgAJoCEdAk5vCMHbAUXV9lChoBkdAcotSKm8/U2gHTa8BaAhHQJOu78YQ8Ol1fZQoaAZHQGv2QzUI9kloB02FAWgIR0CTsLtrsSkCdX2UKGgGR0BvWJOWSlnAaAdNZgFoCEdAk7DRdyDIzXV9lChoBkdAcmKQPI4lyGgHTXIBaAhHQJOx8nw5NoJ1fZQoaAZHQHB+GlqJuVJoB01DAWgIR0CTsk1B+nZTdX2UKGgGR0BJQM5fdAPeaAdNLQFoCEdAk7KlIAfdRHV9lChoBkdAbLsna37UG2gHTWMBaAhHQJOzeR7qptJ1fZQoaAZHQHBdnYUWVNZoB011AWgIR0CTs4R7JGONdX2UKGgGR0BywRrULDyfaAdNaQFoCEdAk7QjzND+i3V9lChoBkdAcJSowEhaDGgHTUoBaAhHQJO0OJwbVBl1fZQoaAZHQHDlw4jrzGxoB008AWgIR0CTtIyiEg4fdX2UKGgGR0BwrqApazNVaAdNlwFoCEdAk7U18kUsWnV9lChoBkdAb0GFjd56dGgHTUMBaAhHQJO4IXUH6dl1fZQoaAZHQGyprAxi5NJoB01sAWgIR0CTuDSVGCqZdX2UKGgGR0BrtVJrcj7iaAdNYQFoCEdAk7ihn8Koh3V9lChoBkdAb02wB5ooNWgHTYMBaAhHQJO9HoJRfnh1fZQoaAZHQGzU3rdFfAtoB01dAWgIR0CTvWal1r6+dX2UKGgGR0ByIkBbOeJ6aAdNPwFoCEdAk723PzFuN3V9lChoBkdAa01KFqSHM2gHTXMBaAhHQJO+StA9mpV1fZQoaAZHQHG3XKKYRd1oB01bAWgIR0CTvo7kn1FpdX2UKGgGR0A04VUuL740aAdNPwFoCEdAk78lKwpvxnV9lChoBkdAb1sMKCxu9GgHTWIBaAhHQJO/m7VawEB1fZQoaAZHQGuP2rfcesBoB01XAWgIR0CTwA4Uvf0mdX2UKGgGR0Bw6zl90A93aAdNWwFoCEdAk8DfnW8RMHV9lChoBkdAbvze1rqMWGgHTU0BaAhHQJPBoaZQYUF1fZQoaAZHQG98aUJOWSloB01mAWgIR0CTwbsUIsy0dX2UKGgGR0BhNS6pYLb6aAdN6ANoCEdAk8HW+PBBRnV9lChoBkdAcB/ACGN70GgHTZ8BaAhHQJPDMrNGEwp1fZQoaAZHQGzYhjFyaNNoB01SAWgIR0CTxTd/8VHndX2UKGgGR0Bvd43gk1MuaAdNhgFoCEdAk8eS4z7/GXV9lChoBkdAbyrxIatLc2gHTVMBaAhHQJPMEF3Y+St1fZQoaAZHQG7PqFIuoP1oB01VAWgIR0CTzGob4rSWdX2UKGgGR0BtMW5OJtSAaAdNUwFoCEdAk80yt3fQ8nV9lChoBkdAcWEMNtqHoGgHTWABaAhHQJPOom4RVZN1fZQoaAZHQG2A3VTaTOhoB01WAWgIR0CTzrDNQj2SdX2UKGgGR0BxOC2CuloEaAdNVQFoCEdAk9Ahw2l2vHV9lChoBkdAcbW889wFT2gHTaUBaAhHQJPQQ7muDBd1fZQoaAZHQGyjzDn/1g9oB01KAWgIR0CT0IBd2PkrdX2UKGgGR0BwTez/p+tsaAdNTwFoCEdAk9DMwYcebXV9lChoBkdAbyvdi2DxsmgHTa8BaAhHQJPRUtCiRGN1fZQoaAZHQG56y7f51vFoB02MAWgIR0CT0Vplz2eydX2UKGgGR0Byg38qFyq/aAdNgwFoCEdAk9KTe9Ba93V9lChoBkdAb7oTtb9qDmgHTYUBaAhHQJPULDKoybh1fZQoaAZHQGp4A13t8eFoB01vAWgIR0CT1Q2/i5uqdX2UKGgGR0BxG9fAsTWYaAdNRwFoCEdAk9VepOvdM3VlLg=="
|
49 |
},
|
50 |
"ep_success_buffer": {
|
51 |
":type:": "<class 'collections.deque'>",
|
|
|
54 |
"_n_updates": 248,
|
55 |
"observation_space": {
|
56 |
":type:": "<class 'gymnasium.spaces.box.Box'>",
|
57 |
+
":serialized:": "gAWVdgIAAAAAAACMFGd5bW5hc2l1bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lIwFZHR5cGWUk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMDWJvdW5kZWRfYmVsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWCAAAAAAAAAABAQEBAQEBAZRoCIwCYjGUiYiHlFKUKEsDjAF8lE5OTkr/////Sv////9LAHSUYksIhZSMAUOUdJRSlIwNYm91bmRlZF9hYm92ZZRoESiWCAAAAAAAAAABAQEBAQEBAZRoFUsIhZRoGXSUUpSMBl9zaGFwZZRLCIWUjANsb3eUaBEoliAAAAAAAAAAAAC0wgAAtMIAAKDAAACgwNsPScAAAKDAAAAAgAAAAICUaAtLCIWUaBl0lFKUjARoaWdolGgRKJYgAAAAAAAAAAAAtEIAALRCAACgQAAAoEDbD0lAAACgQAAAgD8AAIA/lGgLSwiFlGgZdJRSlIwIbG93X3JlcHKUjFtbLTkwLiAgICAgICAgLTkwLiAgICAgICAgIC01LiAgICAgICAgIC01LiAgICAgICAgIC0zLjE0MTU5MjcgIC01LgogIC0wLiAgICAgICAgIC0wLiAgICAgICBdlIwJaGlnaF9yZXBylIxTWzkwLiAgICAgICAgOTAuICAgICAgICAgNS4gICAgICAgICA1LiAgICAgICAgIDMuMTQxNTkyNyAgNS4KICAxLiAgICAgICAgIDEuICAgICAgIF2UjApfbnBfcmFuZG9tlE51Yi4=",
|
58 |
"dtype": "float32",
|
59 |
"bounded_below": "[ True True True True True True True True]",
|
60 |
"bounded_above": "[ True True True True True True True True]",
|
|
|
69 |
},
|
70 |
"action_space": {
|
71 |
":type:": "<class 'gymnasium.spaces.discrete.Discrete'>",
|
72 |
+
":serialized:": "gAWV2wAAAAAAAACMGWd5bW5hc2l1bS5zcGFjZXMuZGlzY3JldGWUjAhEaXNjcmV0ZZSTlCmBlH2UKIwBbpSMFW51bXB5LmNvcmUubXVsdGlhcnJheZSMBnNjYWxhcpSTlIwFbnVtcHmUjAVkdHlwZZSTlIwCaTiUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYkMIBAAAAAAAAACUhpRSlIwFc3RhcnSUaAhoDkMIAAAAAAAAAACUhpRSlIwGX3NoYXBllCmMBWR0eXBllGgOjApfbnBfcmFuZG9tlE51Yi4=",
|
73 |
"n": "4",
|
74 |
"start": "0",
|
75 |
"_shape": [],
|
ppo-LunarLander-v2/policy.optimizer.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 88362
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:10df68dfbc2db698acf417de2ed29f513d986e97ebb1166d6b0ef1f82c659042
|
3 |
size 88362
|
ppo-LunarLander-v2/policy.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 43762
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:76c641aa10f449b66a5ea9df24f8a0ca27400bb41d04c2a10a77b7e17ebacd81
|
3 |
size 43762
|
ppo-LunarLander-v2/system_info.txt
CHANGED
@@ -3,7 +3,7 @@
|
|
3 |
- Stable-Baselines3: 2.0.0a5
|
4 |
- PyTorch: 2.1.0+cu121
|
5 |
- GPU Enabled: True
|
6 |
-
- Numpy: 1.
|
7 |
- Cloudpickle: 2.2.1
|
8 |
- Gymnasium: 0.28.1
|
9 |
- OpenAI Gym: 0.25.2
|
|
|
3 |
- Stable-Baselines3: 2.0.0a5
|
4 |
- PyTorch: 2.1.0+cu121
|
5 |
- GPU Enabled: True
|
6 |
+
- Numpy: 1.25.2
|
7 |
- Cloudpickle: 2.2.1
|
8 |
- Gymnasium: 0.28.1
|
9 |
- OpenAI Gym: 0.25.2
|
replay.mp4
CHANGED
Binary files a/replay.mp4 and b/replay.mp4 differ
|
|
results.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"
|
|
|
1 |
+
{"mean_reward": 248.5929993005795, "std_reward": 18.37102069176698, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2024-03-05T02:28:34.300870"}
|