Initial commit
Browse files- README.md +1 -1
- a2c-AntBulletEnv-v0.zip +1 -1
- a2c-AntBulletEnv-v0/data +17 -17
- a2c-AntBulletEnv-v0/policy.optimizer.pth +1 -1
- a2c-AntBulletEnv-v0/policy.pth +1 -1
- config.json +1 -1
- replay.mp4 +2 -2
- results.json +1 -1
- vec_normalize.pkl +1 -1
README.md
CHANGED
@@ -16,7 +16,7 @@ model-index:
|
|
16 |
type: AntBulletEnv-v0
|
17 |
metrics:
|
18 |
- type: mean_reward
|
19 |
-
value:
|
20 |
name: mean_reward
|
21 |
verified: false
|
22 |
---
|
|
|
16 |
type: AntBulletEnv-v0
|
17 |
metrics:
|
18 |
- type: mean_reward
|
19 |
+
value: 1338.40 +/- 40.49
|
20 |
name: mean_reward
|
21 |
verified: false
|
22 |
---
|
a2c-AntBulletEnv-v0.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 129296
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:40eae53b8fb6480bf192bf59f2d021440c94cd3e164b176143623150797781cb
|
3 |
size 129296
|
a2c-AntBulletEnv-v0/data
CHANGED
@@ -4,20 +4,20 @@
|
|
4 |
":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==",
|
5 |
"__module__": "stable_baselines3.common.policies",
|
6 |
"__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
|
7 |
-
"__init__": "<function ActorCriticPolicy.__init__ at
|
8 |
-
"_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at
|
9 |
-
"reset_noise": "<function ActorCriticPolicy.reset_noise at
|
10 |
-
"_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at
|
11 |
-
"_build": "<function ActorCriticPolicy._build at
|
12 |
-
"forward": "<function ActorCriticPolicy.forward at
|
13 |
-
"extract_features": "<function ActorCriticPolicy.extract_features at
|
14 |
-
"_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at
|
15 |
-
"_predict": "<function ActorCriticPolicy._predict at
|
16 |
-
"evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at
|
17 |
-
"get_distribution": "<function ActorCriticPolicy.get_distribution at
|
18 |
-
"predict_values": "<function ActorCriticPolicy.predict_values at
|
19 |
"__abstractmethods__": "frozenset()",
|
20 |
-
"_abc_impl": "<_abc._abc_data object at
|
21 |
},
|
22 |
"verbose": 1,
|
23 |
"policy_kwargs": {
|
@@ -64,7 +64,7 @@
|
|
64 |
"_num_timesteps_at_start": 0,
|
65 |
"seed": null,
|
66 |
"action_noise": null,
|
67 |
-
"start_time":
|
68 |
"learning_rate": 0.00096,
|
69 |
"tensorboard_log": null,
|
70 |
"lr_schedule": {
|
@@ -73,7 +73,7 @@
|
|
73 |
},
|
74 |
"_last_obs": {
|
75 |
":type:": "<class 'numpy.ndarray'>",
|
76 |
-
":serialized:": "
|
77 |
},
|
78 |
"_last_episode_starts": {
|
79 |
":type:": "<class 'numpy.ndarray'>",
|
@@ -81,7 +81,7 @@
|
|
81 |
},
|
82 |
"_last_original_obs": {
|
83 |
":type:": "<class 'numpy.ndarray'>",
|
84 |
-
":serialized:": "
|
85 |
},
|
86 |
"_episode_num": 0,
|
87 |
"use_sde": true,
|
@@ -89,7 +89,7 @@
|
|
89 |
"_current_progress_remaining": 0.0,
|
90 |
"ep_info_buffer": {
|
91 |
":type:": "<class 'collections.deque'>",
|
92 |
-
":serialized:": "
|
93 |
},
|
94 |
"ep_success_buffer": {
|
95 |
":type:": "<class 'collections.deque'>",
|
|
|
4 |
":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==",
|
5 |
"__module__": "stable_baselines3.common.policies",
|
6 |
"__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
|
7 |
+
"__init__": "<function ActorCriticPolicy.__init__ at 0x7fed0a5e70a0>",
|
8 |
+
"_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at 0x7fed0a5e7130>",
|
9 |
+
"reset_noise": "<function ActorCriticPolicy.reset_noise at 0x7fed0a5e71c0>",
|
10 |
+
"_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at 0x7fed0a5e7250>",
|
11 |
+
"_build": "<function ActorCriticPolicy._build at 0x7fed0a5e72e0>",
|
12 |
+
"forward": "<function ActorCriticPolicy.forward at 0x7fed0a5e7370>",
|
13 |
+
"extract_features": "<function ActorCriticPolicy.extract_features at 0x7fed0a5e7400>",
|
14 |
+
"_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at 0x7fed0a5e7490>",
|
15 |
+
"_predict": "<function ActorCriticPolicy._predict at 0x7fed0a5e7520>",
|
16 |
+
"evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at 0x7fed0a5e75b0>",
|
17 |
+
"get_distribution": "<function ActorCriticPolicy.get_distribution at 0x7fed0a5e7640>",
|
18 |
+
"predict_values": "<function ActorCriticPolicy.predict_values at 0x7fed0a5e76d0>",
|
19 |
"__abstractmethods__": "frozenset()",
|
20 |
+
"_abc_impl": "<_abc._abc_data object at 0x7fed0a5e9980>"
|
21 |
},
|
22 |
"verbose": 1,
|
23 |
"policy_kwargs": {
|
|
|
64 |
"_num_timesteps_at_start": 0,
|
65 |
"seed": null,
|
66 |
"action_noise": null,
|
67 |
+
"start_time": 1685487574575203321,
|
68 |
"learning_rate": 0.00096,
|
69 |
"tensorboard_log": null,
|
70 |
"lr_schedule": {
|
|
|
73 |
},
|
74 |
"_last_obs": {
|
75 |
":type:": "<class 'numpy.ndarray'>",
|
76 |
+
":serialized:": "gAWVNQIAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJbAAQAAAAAAAKxJWr+xpyU/3SAMPzaEcL5lSLQ+OVSjPus4cDz1XAU9Ak/pvVo1tD5rAYS+hPghPjsk/76LC709xQoGPyD3xT0aM7c/VQdnvifMrz6qGhg93yRevUyYVL/2x4u+dgj7vk8EOz/30r4+yED/PjAhkL+5aba/8gAwPwNqCT9ohh+/oCdVvl29fz6kzr88IQ8zPgSo/b515hg/bfAMvz1oCT0vpzS/WKiQPtJQSD9yCtQ+AhlCP8oG5DxUvDM/qVMpvtmCvD0JNY0+3GUXvxrjxj1PBDs/99K+PshA/z4wIZC/0iiGv5uRyz5EnRU/ttNEPkpT6b7bFZQ+Fr2IPmHaUz5T9T8+ArGLPqH0nr5x2Wo9U+qvvvj+V75Mz8w+089AvmKVED+uE+e+S6UoP0jxtD37kmQ+qZqgPK81CL9yo4W+TwQ7P/fSvj7IQP8+MCGQvwNBTb/nPxs/+YUOP0YgpD5SWjU/tmYiP0j7jL6NlG++QMAHPhUHpz7UZ5u+QF+6PrxE3r5Q9fg9JHBYPy5O0r4gYm4/wyEAv41phD7xbPo+rs3avE1TOr8N8fa+01qLPU8EOz/30r4+yED/PjAhkL+UjAVudW1weZSMBWR0eXBllJOUjAJmNJSJiIeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiSwRLHIaUjAFDlHSUUpQu"
|
77 |
},
|
78 |
"_last_episode_starts": {
|
79 |
":type:": "<class 'numpy.ndarray'>",
|
|
|
81 |
},
|
82 |
"_last_original_obs": {
|
83 |
":type:": "<class 'numpy.ndarray'>",
|
84 |
+
":serialized:": "gAWVNQIAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJbAAQAAAAAAAAAAAABArv01AACAPwAAAAAAAAAAAAAAAAAAAAAAAACAcJ5fvQAAAADbAPK/AAAAABEaD74AAAAAQHn6PwAAAACZa9Y8AAAAAALy5z8AAAAAOigQvgAAAAB909m/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAiCqBNgAAgD8AAAAAAAAAAAAAAAAAAAAAAAAAgAiSor0AAAAAJMX3vwAAAADvZMw9AAAAALge9z8AAAAAS9e+PQAAAABdhPk/AAAAADxT2D0AAAAASnfovwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMaATDYAAIA/AAAAAAAAAAAAAAAAAAAAAAAAAIDMNRI+AAAAAL6A5r8AAAAAvYfsPQAAAADr5v0/AAAAAH2hmT0AAAAAwtD5PwAAAAADm5m9AAAAAGmV5b8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADVgrQ0AACAPwAAAAAAAAAAAAAAAAAAAAAAAACA29iLvQAAAAB9pdm/AAAAADYgu7wAAAAApmbqPwAAAAAgHzu8AAAAAGlq9D8AAAAAZSm2PAAAAABw8eK/AAAAAAAAAAAAAAAAAAAAAAAAAACUjAVudW1weZSMBWR0eXBllJOUjAJmNJSJiIeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiSwRLHIaUjAFDlHSUUpQu"
|
85 |
},
|
86 |
"_episode_num": 0,
|
87 |
"use_sde": true,
|
|
|
89 |
"_current_progress_remaining": 0.0,
|
90 |
"ep_info_buffer": {
|
91 |
":type:": "<class 'collections.deque'>",
|
92 |
+
":serialized:": "gAWVRAwAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpRHQJKD0Fjd56eMAWyUTegDjAF0lEdApLADcZccEXV9lChoBkdAk8eb9l2/z2gHTegDaAhHQKSyZfWtlqd1fZQoaAZHQJOvuL61stVoB03oA2gIR0Cksse+mFajdX2UKGgGR0CJ1LlYEGJOaAdN6ANoCEdApLNl7Y02tXV9lChoBkdAlkn4smOU+2gHTegDaAhHQKS/4o0hvBJ1fZQoaAZHQJcFqYrrgO1oB03oA2gIR0CkwjIDPnjidX2UKGgGR0CUGEtALRa5aAdN6ANoCEdApMKcIVuaW3V9lChoBkdAk3WIBaLXMGgHTegDaAhHQKTDPJwsGxF1fZQoaAZHQJblhzfaYeFoB03oA2gIR0Ckz8HUDuBudX2UKGgGR0CTcMhm5DqoaAdN6ANoCEdApNIu7tiQT3V9lChoBkdAltB3EdeY2WgHTegDaAhHQKTSkgvDgqF1fZQoaAZHQIud3IS13MZoB03oA2gIR0Ck0zJXyRSxdX2UKGgGR0CXV+5jH4oJaAdN6ANoCEdApN9+pOvdM3V9lChoBkdAlf4FtO2y9mgHTegDaAhHQKThyxiXpnp1fZQoaAZHQItHbMcIZ65oB03oA2gIR0Ck4iQ97ngYdX2UKGgGR0CWV/8pkPMCaAdN6ANoCEdApOK+FlCkXXV9lChoBkdAl2XnGOuJUGgHTegDaAhHQKTv26PKdQR1fZQoaAZHQJUHAk6cRUZoB03oA2gIR0Ck8lNHpbD/dX2UKGgGR0CXRa51eSjhaAdN6ANoCEdApPK7A57w8XV9lChoBkdAlBrpxeb/fmgHTegDaAhHQKTzYHbAUL51fZQoaAZHQJTZ+fywwCdoB03oA2gIR0ClALVJUYKqdX2UKGgGR0CUx9Ti83+/aAdN6ANoCEdApQMy7TUiIXV9lChoBkdAli9783uNP2gHTegDaAhHQKUDrUPxx1h1fZQoaAZHQJTBoLYwqRVoB03oA2gIR0ClBEqw6hg3dX2UKGgGR0CQ2jysjmjkaAdN6ANoCEdApREcRUWEb3V9lChoBkdAk+pLVFx4p2gHTegDaAhHQKUTf6VMVUN1fZQoaAZHQJfI84Qz1sdoB03oA2gIR0ClE+tLL6k7dX2UKGgGR0CRTLkE9t/GaAdN6ANoCEdApRSHnKW9lHV9lChoBkdAlMzGucMEzWgHTegDaAhHQKUhlIo3Jgd1fZQoaAZHQJbP5UlzEJloB03oA2gIR0ClI/yteUpvdX2UKGgGR0CWE0RdyDIzaAdN6ANoCEdApSR1jG1hLHV9lChoBkdAi1TmGVRk3GgHTegDaAhHQKUlJDQ7cO91fZQoaAZHQJgTMK3NLUVoB03oA2gIR0ClMeBtDUmVdX2UKGgGR0CWToa7VawEaAdN6ANoCEdApTRLXrdFfHV9lChoBkdAlqTEmplz2mgHTegDaAhHQKU0rdzGPxR1fZQoaAZHQJhKQ61b7j1oB03oA2gIR0ClNUKyfL9udX2UKGgGR0CW1xw7kn1GaAdN6ANoCEdApUIG65Gz8nV9lChoBkdAlqmawQlKLGgHTegDaAhHQKVEVxc3VCp1fZQoaAZHQJKbFFI/Z/VoB03oA2gIR0ClRL9X9zfadX2UKGgGR0CXmQOHWSU1aAdN6ANoCEdApUVYoVmBfHV9lChoBkdAkej/hIe5nWgHTegDaAhHQKVSFeSjgyd1fZQoaAZHQIsHiKm8/UxoB03oA2gIR0ClVHdWIXTFdX2UKGgGR0CXCN++/QBxaAdN6ANoCEdApVTnT/hl2HV9lChoBkdAlSS79VFQVWgHTegDaAhHQKVVh0L+glF1fZQoaAZHQJGik8uBczJoB03oA2gIR0ClYpgFPi1idX2UKGgGR0CTtaQ3xWkraAdN6ANoCEdApWUev4dp7HV9lChoBkdAkuGqK+BYm2gHTegDaAhHQKVlgc6vJRx1fZQoaAZHQJFoCiSJTERoB03oA2gIR0ClZhgLqlgudX2UKGgGR0CUqgNdqtYCaAdN6ANoCEdApXMhAt4A0nV9lChoBkdAfXwOoo/iYWgHTegDaAhHQKV1dzqbBoF1fZQoaAZHQJJPlQsPJ7toB03oA2gIR0CldeE2P1cudX2UKGgGR0CVSRpXp4bCaAdN6ANoCEdApXZ+yX2M9HV9lChoBkdAiww4msvIwWgHTegDaAhHQKWDFkvsZ511fZQoaAZHQJTByFyq+8JoB03oA2gIR0ClhXG2kSEldX2UKGgGR0CVsTqyGBWgaAdN6ANoCEdApYXcVclgMXV9lChoBkdAlcHrLdN34mgHTegDaAhHQKWGeO0b9611fZQoaAZHQJNfDYRNATtoB03oA2gIR0Clkyg75mAcdX2UKGgGR0CWfKVy3kPuaAdN6ANoCEdApZWOrp7kXHV9lChoBkdAjXBUF0PpZGgHTegDaAhHQKWV7Q8fV7R1fZQoaAZHQJOfFRqGlANoB03oA2gIR0Cllo+LehwmdX2UKGgGR0CTH7R4hUzbaAdN6ANoCEdApaMzIaLn93V9lChoBkdAkrpkCV8kU2gHTegDaAhHQKWliyquKXR1fZQoaAZHQJHIgb1h9b5oB03oA2gIR0ClpgA9Net0dX2UKGgGR0CQUBkXUH6eaAdN6ANoCEdApaanBvaURnV9lChoBkdAlVBGs3hn8WgHTegDaAhHQKWz8RBeHBV1fZQoaAZHQJN06UOd5IJoB03oA2gIR0Cltjr2xptadX2UKGgGR0CWMUAMlTm5aAdN6ANoCEdApbahPfsNUnV9lChoBkdAlRy5JoTPB2gHTegDaAhHQKW3TAs052h1fZQoaAZHQJaJUYfnwG5oB03oA2gIR0Clw+Bz3h4udX2UKGgGR0CTOWJf6XSjaAdN6ANoCEdApcY7sOXmeXV9lChoBkdAlbn+nl4keWgHTegDaAhHQKXGmtuDSPV1fZQoaAZHQJQHCgf2bodoB03oA2gIR0ClxzpItlI3dX2UKGgGR0CU8F3nIQvpaAdN6ANoCEdApdPF6C17Y3V9lChoBkdAk5/bHMlkY2gHTegDaAhHQKXWLst03fh1fZQoaAZHQJVKBWbPQfJoB03oA2gIR0Cl1pugQHzIdX2UKGgGR0CTlvtrbg0kaAdN6ANoCEdApdc+Dxsl9nV9lChoBkdAlQsqiCaqj2gHTegDaAhHQKXj298JD3N1fZQoaAZHQJXWiTbFjutoB03oA2gIR0Cl5jUtyxRmdX2UKGgGR0CWXSjiGWUsaAdN6ANoCEdApealKf4AS3V9lChoBkdAlM0Tf3vhImgHTegDaAhHQKXnVfLs8gZ1fZQoaAZHQJXqQS9M9KVoB03oA2gIR0Cl89iYb83udX2UKGgGR0CVCDqrR0EHaAdN6ANoCEdApfYklTm4iHV9lChoBkdAleG1QqI8AGgHTegDaAhHQKX2kOYIBzV1fZQoaAZHQJWjLMmnfl9oB03oA2gIR0Cl9yxJ/XoUdX2UKGgGR0CWTBoWYWtVaAdN6ANoCEdApgOwZuQ6qHV9lChoBkdAlauz7l7tzGgHTegDaAhHQKYGAkv9LpR1fZQoaAZHQJdDpp35eqtoB03oA2gIR0CmBmWZy+6AdX2UKGgGR0CWZimF8G9paAdN6ANoCEdApgcF36hxpHV9lChoBkdAlnZWkzoECGgHTegDaAhHQKYTcBDohZB1fZQoaAZHQJTU7IKc/dJoB03oA2gIR0CmFb0IcBEKdX2UKGgGR0CTW//Ot4iYaAdN6ANoCEdAphYs6FM7EHV9lChoBkdAlaOURFqi5GgHTegDaAhHQKYW1LdvbXZ1fZQoaAZHQJNyvEfkmyBoB03oA2gIR0CmI4XQdCE6dX2UKGgGR0CRm/hAnlXBaAdN6ANoCEdApiXWC/XXiHV9lChoBkdAk5GQAAAAAGgHTegDaAhHQKYmOjvd/KB1fZQoaAZHQJI3wOBlMAZoB03oA2gIR0CmJs+lsP8RdX2UKGgGR0CSeSG4I8hcaAdN6ANoCEdApjOJceKba3V9lChoBkdAlDNzjFQ2uWgHTegDaAhHQKY10IMSbph1fZQoaAZHQJUM4q6OHWVoB03oA2gIR0CmNjW+GoJidX2UKGgGR0CSSd9VFQVLaAdN6ANoCEdApjbUT37DVHVlLg=="
|
93 |
},
|
94 |
"ep_success_buffer": {
|
95 |
":type:": "<class 'collections.deque'>",
|
a2c-AntBulletEnv-v0/policy.optimizer.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 56190
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f2976279656f9165ad0ef53a8959773462a0dd9cf55fa3b89ff6addf39932bf6
|
3 |
size 56190
|
a2c-AntBulletEnv-v0/policy.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 56894
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2fbcb2c23a3029c46b5f777d44e1c3acca4b45d7c59109f66bff420293ac6173
|
3 |
size 56894
|
config.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"policy_class": {":type:": "<class 'abc.ABCMeta'>", ":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==", "__module__": "stable_baselines3.common.policies", "__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ", "__init__": "<function ActorCriticPolicy.__init__ at 0x7f3cbc320790>", "_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at 0x7f3cbc320820>", "reset_noise": "<function ActorCriticPolicy.reset_noise at 0x7f3cbc3208b0>", "_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at 0x7f3cbc320940>", "_build": "<function ActorCriticPolicy._build at 0x7f3cbc3209d0>", "forward": "<function ActorCriticPolicy.forward at 0x7f3cbc320a60>", "extract_features": "<function ActorCriticPolicy.extract_features at 0x7f3cbc320af0>", "_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at 0x7f3cbc320b80>", "_predict": "<function ActorCriticPolicy._predict at 0x7f3cbc320c10>", "evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at 0x7f3cbc320ca0>", "get_distribution": "<function ActorCriticPolicy.get_distribution at 0x7f3cbc320d30>", "predict_values": "<function ActorCriticPolicy.predict_values at 0x7f3cbc320dc0>", "__abstractmethods__": "frozenset()", "_abc_impl": "<_abc._abc_data object at 0x7f3cbc50b200>"}, "verbose": 1, "policy_kwargs": {":type:": "<class 'dict'>", ":serialized:": "gAWVowAAAAAAAAB9lCiMDGxvZ19zdGRfaW5pdJRK/v///4wKb3J0aG9faW5pdJSJjA9vcHRpbWl6ZXJfY2xhc3OUjBN0b3JjaC5vcHRpbS5ybXNwcm9wlIwHUk1TcHJvcJSTlIwQb3B0aW1pemVyX2t3YXJnc5R9lCiMBWFscGhhlEc/764UeuFHrowDZXBzlEc+5Pi1iONo8YwMd2VpZ2h0X2RlY2F5lEsAdXUu", "log_std_init": -2, "ortho_init": false, "optimizer_class": "<class 'torch.optim.rmsprop.RMSprop'>", "optimizer_kwargs": {"alpha": 0.99, "eps": 1e-05, "weight_decay": 0}}, "observation_space": {":type:": "<class 'gym.spaces.box.Box'>", ":serialized:": "gAWVbQIAAAAAAACMDmd5bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lIwFZHR5cGWUk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMBl9zaGFwZZRLHIWUjANsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWcAAAAAAAAAAAAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/lGgLSxyFlIwBQ5R0lFKUjARoaWdolGgTKJZwAAAAAAAAAAAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH+UaAtLHIWUaBZ0lFKUjA1ib3VuZGVkX2JlbG93lGgTKJYcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACUaAiMAmIxlImIh5RSlChLA4wBfJROTk5K/////0r/////SwB0lGJLHIWUaBZ0lFKUjA1ib3VuZGVkX2Fib3ZllGgTKJYcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACUaCJLHIWUaBZ0lFKUjApfbnBfcmFuZG9tlE51Yi4=", "dtype": "float32", "_shape": [28], "low": "[-inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf\n -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf]", "high": "[inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf\n inf inf inf inf inf inf inf inf inf inf]", "bounded_below": "[False False False False False False False False False False False False\n False False False False False False False False False False False False\n False False False False]", "bounded_above": "[False False False False False False False False False False False False\n False False False False False False False False False False False False\n False False False False]", "_np_random": null}, "action_space": {":type:": "<class 'gym.spaces.box.Box'>", ":serialized:": "gAWVpQEAAAAAAACMDmd5bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lIwFZHR5cGWUk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMBl9zaGFwZZRLCIWUjANsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWIAAAAAAAAAAAAIC/AACAvwAAgL8AAIC/AACAvwAAgL8AAIC/AACAv5RoC0sIhZSMAUOUdJRSlIwEaGlnaJRoEyiWIAAAAAAAAAAAAIA/AACAPwAAgD8AAIA/AACAPwAAgD8AAIA/AACAP5RoC0sIhZRoFnSUUpSMDWJvdW5kZWRfYmVsb3eUaBMolggAAAAAAAAAAQEBAQEBAQGUaAiMAmIxlImIh5RSlChLA4wBfJROTk5K/////0r/////SwB0lGJLCIWUaBZ0lFKUjA1ib3VuZGVkX2Fib3ZllGgTKJYIAAAAAAAAAAEBAQEBAQEBlGgiSwiFlGgWdJRSlIwKX25wX3JhbmRvbZROdWIu", "dtype": "float32", "_shape": [8], "low": "[-1. -1. -1. -1. -1. -1. -1. -1.]", "high": "[1. 1. 1. 1. 1. 1. 1. 1.]", "bounded_below": "[ True True True True True True True True]", "bounded_above": "[ True True True True True True True True]", "_np_random": null}, "n_envs": 4, "num_timesteps": 2000000, "_total_timesteps": 2000000, "_num_timesteps_at_start": 0, "seed": null, "action_noise": null, "start_time": 1683677836311692961, "learning_rate": 0.00096, "tensorboard_log": null, "lr_schedule": {":type:": "<class 'function'>", ":serialized:": "gAWV6wIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMXC9ob21lL2hhaWRlci9taW5jb25kYTMvdWJ1bnR1L2xpYi9weXRob24zLjEwL3NpdGUtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lIwEZnVuY5RLgkMCBAGUjAN2YWyUhZQpdJRSlH2UKIwLX19wYWNrYWdlX1+UjBhzdGFibGVfYmFzZWxpbmVzMy5jb21tb26UjAhfX25hbWVfX5SMHnN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi51dGlsc5SMCF9fZmlsZV9flIxcL2hvbWUvaGFpZGVyL21pbmNvbmRhMy91YnVudHUvbGliL3B5dGhvbjMuMTAvc2l0ZS1wYWNrYWdlcy9zdGFibGVfYmFzZWxpbmVzMy9jb21tb24vdXRpbHMucHmUdU5OaACMEF9tYWtlX2VtcHR5X2NlbGyUk5QpUpSFlHSUUpSMHGNsb3VkcGlja2xlLmNsb3VkcGlja2xlX2Zhc3SUjBJfZnVuY3Rpb25fc2V0c3RhdGWUk5RoH32UfZQoaBZoDYwMX19xdWFsbmFtZV9flIwZY29uc3RhbnRfZm4uPGxvY2Fscz4uZnVuY5SMD19fYW5ub3RhdGlvbnNfX5R9lIwOX19rd2RlZmF1bHRzX1+UTowMX19kZWZhdWx0c19flE6MCl9fbW9kdWxlX1+UaBeMB19fZG9jX1+UTowLX19jbG9zdXJlX1+UaACMCl9tYWtlX2NlbGyUk5RHP091EE1VHWmFlFKUhZSMF19jbG91ZHBpY2tsZV9zdWJtb2R1bGVzlF2UjAtfX2dsb2JhbHNfX5R9lHWGlIZSMC4="}, "_last_obs": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVNQIAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJbAAQAAAAAAAGw2jb0ktO6/G0sAv3EyNj/3NNk/ONiAvxnoNT+U8oS9//Gwv2IyCT8AE8U+4bWMv+OR1L+4zeq/mVTqvi43Tr9Kc4O/5nJdP9PHmj4byOu9IDxuvwrQjj/ZCAq/Lay5v/QivD4Z75g+7jyKPuzDnz4nlFe9cFjMv0+uO74aKlq9Sf+KPwQaxD3FTIs+Vp0HPm99rL0pQnM/77ktPjX5n72f8YE/Iu+svDSr3b0meAk9IjAmvxdkzj1uDyk9SQlzOseSnj/4i+i+Q9xavtG6gb30Irw+Ge+YPu48ij7sw58++ucdQPZ13r7Ln+o+LbKLQDBiQD5aZRTAbLVrvxwLjr/vYHc/2yjNPp60aECYyzhAN/0cP2W51D9PjEu/Cg+pvwQgQT5jqt2/qJcWwBM+0z/Yt2M/yo2wv2csMb98iu849CK8PitDVsDuPIo+zxlNwASmxD87nk2/5sO5Pkm0rL7CB6G9dvjpPWqsyj9+eqK+0D+IPy2uWLyhamVAtIVVvG7vm74x1GO/YSS7v8CGAr+znIi/D1DHPCRNMr8Wg4I9vZ6zvqtworydyRE9CmktvfQivD4rQ1bA7jyKPuzDnz6UjAVudW1weZSMBWR0eXBllJOUjAJmNJSJiIeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiSwRLHIaUjAFDlHSUUpQu"}, "_last_episode_starts": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVdwAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYEAAAAAAAAAAAAAACUjAVudW1weZSMBWR0eXBllJOUjAJiMZSJiIeUUpQoSwOMAXyUTk5OSv////9K/////0sAdJRiSwSFlIwBQ5R0lFKULg=="}, "_last_original_obs": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVNQIAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJbAAQAAAAAAAAAAAAA3q0s2AACAPwAAAAAAAAAAAAAAAAAAAAAAAACAnipHPQAAAAC4x/a/AAAAAP3uJz0AAAAALSTmPwAAAACD2AY+AAAAALZK4D8AAAAAMOdKvAAAAACF4d+/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADDkKNgAAgD8AAAAAAAAAAAAAAAAAAAAAAAAAgBUM6rwAAAAASfjfvwAAAACOyKo8AAAAAGDa9T8AAAAAymgAPgAAAAB7EvU/AAAAAGQ6oz0AAAAAc53ovwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAANUggTYAAIA/AAAAAAAAAAAAAAAAAAAAAAAAAIAuuuY9AAAAAM1C878AAAAAYAnPPQAAAABYvts/AAAAAGNf4T0AAAAAYVXoPwAAAAAUpoG9AAAAAAgD8r8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACZWFQ2AACAPwAAAAAAAAAAAAAAAAAAAAAAAACA1HW8vQAAAAD4yf+/AAAAAL6GkbsAAAAAee3/PwAAAAC6dgU+AAAAANM59D8AAAAA8muiPQAAAAA1/eq/AAAAAAAAAAAAAAAAAAAAAAAAAACUjAVudW1weZSMBWR0eXBllJOUjAJmNJSJiIeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiSwRLHIaUjAFDlHSUUpQu"}, "_episode_num": 0, "use_sde": true, "sde_sample_freq": -1, "_current_progress_remaining": 0.0, "ep_info_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVRAwAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpRHQIc17HlwLmaMAWyUTegDjAF0lEdAptOHp4bCJ3V9lChoBkdAhxG9/SYw7GgHTegDaAhHQKbU3a3Zwn91fZQoaAZHQIblesvIwM9oB03oA2gIR0Cm110QbuMNdX2UKGgGR0CHDajUutfYaAdN6ANoCEdAptiu9lEqlXV9lChoBkdAh76nXd0q6WgHTegDaAhHQKbfgPYnOSp1fZQoaAZHQIjUqpxWDHxoB03oA2gIR0Cm4MnKGL1mdX2UKGgGR0CJhTUBGQS0aAdN6ANoCEdApuNQ/Z/Tb3V9lChoBkdAiaT5iuuA7WgHTegDaAhHQKbkfKUVzp51fZQoaAZHQIh2G0zCUHJoB03oA2gIR0Cm628neBQOdX2UKGgGR0CKdBPNVzZIaAdN6ANoCEdApuyy90zTF3V9lChoBkdAi+KsY2sJY2gHTegDaAhHQKbvH9LHuJF1fZQoaAZHQIwzB+BpYcNoB03oA2gIR0Cm8F8mBvrGdX2UKGgGR0CLfelLOAy3aAdN6ANoCEdApvdeYjSofnV9lChoBkdAh5npfpljE2gHTegDaAhHQKb4su+yquN1fZQoaAZHQIpMG7FsHjZoB03oA2gIR0Cm+zT2exwAdX2UKGgGR0CMgxkRSP2gaAdN6ANoCEdApvyMhFEy+HV9lChoBkdAihqNknTiKmgHTegDaAhHQKcDoml67d11fZQoaAZHQIqx+NFSbYtoB03oA2gIR0CnBNyS/0uldX2UKGgGR0CKTuwPAfuDaAdN6ANoCEdApwdSWiUPhHV9lChoBkdAiaQ94eLeh2gHTegDaAhHQKcImHYYixF1fZQoaAZHQIHOmuFHrhRoB03oA2gIR0CnD2pJXhfjdX2UKGgGR0CJkFXg9/z8aAdN6ANoCEdApxClX3g1nHV9lChoBkdAi4CMtTUAk2gHTegDaAhHQKcTE/yoXKt1fZQoaAZHQIxZNTxXnyNoB03oA2gIR0CnFGk5hjOLdX2UKGgGR0CIYcFwDNhWaAdN6ANoCEdApxtCeqaPS3V9lChoBkdAiwTf9gnc+WgHTegDaAhHQKcceeU6gdx1fZQoaAZHQIrtm5jH4oJoB03oA2gIR0CnHwHs9jgAdX2UKGgGR0CIlNqUNayKaAdN6ANoCEdApyBOMju8b3V9lChoBkdAirtgxagVXWgHTegDaAhHQKcnMkv9LpR1fZQoaAZHQIgYKeK8+RpoB03oA2gIR0CnKIRDTjNqdX2UKGgGR0CKUZFqBVdYaAdN6ANoCEdApyr1dszl93V9lChoBkdAjEa3q7iAD2gHTegDaAhHQKcsNIjGDL91fZQoaAZHQIuOG1Bt1p1oB03oA2gIR0CnMvghB7eEdX2UKGgGR0CI7zjBl+VkaAdN6ANoCEdApzQywIMSb3V9lChoBkdAigMm6XjU/mgHTegDaAhHQKc2mO/+Kj11fZQoaAZHQIrDmf29L6FoB03oA2gIR0CnN/mFSKm9dX2UKGgGR0CGWr7VrhzeaAdN6ANoCEdApz8VeY2KmHV9lChoBkdAiXgg7YChe2gHTegDaAhHQKdAVy8SPEN1fZQoaAZHQIjCuJm/WUdoB03oA2gIR0CnQtRDkU9IdX2UKGgGR0CLqFTzd1uBaAdN6ANoCEdAp0QeNvOyFHV9lChoBkdAi5PJ7b+LnGgHTegDaAhHQKdK/QE6kqN1fZQoaAZHQInNBl18stloB03oA2gIR0CnTEKR2bG4dX2UKGgGR0CKv41FYuCgaAdN6ANoCEdAp07L5RCQcXV9lChoBkdAjItOTaCcw2gHTegDaAhHQKdQBq8Djip1fZQoaAZHQIyPE0HhS+BoB03oA2gIR0CnVtQqAjIJdX2UKGgGR0CLqHhQWN3oaAdN6ANoCEdAp1gPai9Iw3V9lChoBkdAi8WGiHqNZWgHTegDaAhHQKdafA8B+4N1fZQoaAZHQIvuUYqG1x9oB03oA2gIR0CnW7mwiaAndX2UKGgGR0CMU0uTzND/aAdN6ANoCEdAp2KAlt0mt3V9lChoBkdAh1O4QBgeBGgHTegDaAhHQKdjw6jFhod1fZQoaAZHQIoB8MspXp5oB03oA2gIR0CnZkoqTbFkdX2UKGgGR0CIqdc7hegMaAdN6ANoCEdAp2d/CO3lS3V9lChoBkdAiXG+7L+xW2gHTegDaAhHQKduoVyFPBV1fZQoaAZHQId+nQY1pCdoB03oA2gIR0Cnb+aUJOWTdX2UKGgGR0CNbCaef7JoaAdN6ANoCEdAp3JRJ7LMcXV9lChoBkdAjSiI3R5TqGgHTegDaAhHQKdzndcB2fV1fZQoaAZHQIrkrOTq0MRoB03oA2gIR0CnepYYR/VidX2UKGgGR0CJYwDhcZ+AaAdN6ANoCEdAp3vBsj3VTnV9lChoBkdAieH+pXIU8GgHTegDaAhHQKd+LYYixFB1fZQoaAZHQIi+ehXbM5hoB03oA2gIR0Cnf3EjgQ6IdX2UKGgGR0CJZAXzlLezaAdN6ANoCEdAp4Y1mUW2w3V9lChoBkdAiRx+2uxKQWgHTegDaAhHQKeHaxoIv8J1fZQoaAZHQIn2PfEXLvFoB03oA2gIR0CnidnPNVzZdX2UKGgGR0CMSPio86mwaAdN6ANoCEdAp4sU4Nqgy3V9lChoBkdAjFnC7K7qZGgHTegDaAhHQKeSBBPbfxd1fZQoaAZHQIoCQ4CIUJxoB03oA2gIR0Cnk07DEWIodX2UKGgGR0CHPmFGG21EaAdN6ANoCEdAp5XCfHxSYXV9lChoBkdAit+9ORDCxmgHTegDaAhHQKeXD4k/r0J1fZQoaAZHQImrREMLF4toB03oA2gIR0CnnhVy3kPudX2UKGgGR0CK0Zq0MPSVaAdN6ANoCEdAp59sxZdOZnV9lChoBkdAiij1G0/nn2gHTegDaAhHQKeh4qHXVb11fZQoaAZHQIi5h1q33HtoB03oA2gIR0Cnoyw4jrzHdX2UKGgGR0CL8wdNnGsFaAdN6ANoCEdAp6n4Qg9vCXV9lChoBkdAiVr1YZEUkGgHTegDaAhHQKerOzlcQiB1fZQoaAZHQIz/hyZKFqVoB03oA2gIR0CnraH+qBEsdX2UKGgGR0CMAkyxA0KraAdN6ANoCEdAp67qmTC+DnV9lChoBkdAhP6GGmDUVmgHTegDaAhHQKe1xAsTWXl1fZQoaAZHQI0tMadc0LtoB03oA2gIR0Cntv/vOQhfdX2UKGgGR0CMsVEofCAMaAdN6ANoCEdAp7mEyULUkXV9lChoBkdAjA5+BH09Q2gHTegDaAhHQKe6ypGWldl1fZQoaAZHQItIIcT8HfNoB03oA2gIR0CnwhNgSeyzdX2UKGgGR0CKQtRE4NqhaAdN6ANoCEdAp8OL6SDAanV9lChoBkdAithisGPgemgHTegDaAhHQKfGX8VpKz11fZQoaAZHQIx/QkqtozxoB03oA2gIR0Cnx7EaMrEtdX2UKGgGR0CMienv2GqQaAdN6ANoCEdAp86gYP5HmXV9lChoBkdAhbTEB0ZFX2gHTegDaAhHQKfP37TlT3t1fZQoaAZHQIylnP1L8JloB03oA2gIR0Cn0lPBJqZddX2UKGgGR0CL6H39JjDsaAdN6ANoCEdAp9OdqSHM2XV9lChoBkdAhrUomw7kn2gHTegDaAhHQKfaje8f3ex1fZQoaAZHQIrigrWiDdxoB03oA2gIR0Cn2+lwcYIjdX2UKGgGR0CJGTH3lCC0aAdN6ANoCEdAp95jSPU8WHV9lChoBkdAh/f9fkWAPWgHTegDaAhHQKffuY6XBxh1fZQoaAZHQIg6taKUFB9oB03oA2gIR0Cn5tPuogmrdX2UKGgGR0CKEN6Hj6vaaAdN6ANoCEdAp+gThm5DqnV9lChoBkdAiVyvf0mMO2gHTegDaAhHQKfqi7Ciypt1fZQoaAZHQIozAR9PUKBoB03oA2gIR0Cn6+Gza9K3dX2UKGgGR0CMbLLGJemfaAdN6ANoCEdAp/LlmcvugHV9lChoBkdAjVXLgn+hoWgHTegDaAhHQKf0GjbBXS11fZQoaAZHQIqHy+36Q/5oB03oA2gIR0Cn9n8kMTewdX2UKGgGR0CNV/UIcBEKaAdN6ANoCEdAp/fG4kNWl3VlLg=="}, "ep_success_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="}, "_n_updates": 62500, "n_steps": 8, "gamma": 0.99, "gae_lambda": 0.9, "ent_coef": 0.0, "vf_coef": 0.4, "max_grad_norm": 0.5, "normalize_advantage": false, "system_info": {"OS": "Linux-5.10.16.3-microsoft-standard-WSL2-x86_64-with-glibc2.35 # 1 SMP Fri Apr 2 22:23:49 UTC 2021", "Python": "3.10.9", "Stable-Baselines3": "1.8.0a2", "PyTorch": "1.13.1+cu117", "GPU Enabled": "True", "Numpy": "1.23.5", "Gym": "0.21.0"}}
|
|
|
1 |
+
{"policy_class": {":type:": "<class 'abc.ABCMeta'>", ":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==", "__module__": "stable_baselines3.common.policies", "__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ", "__init__": "<function ActorCriticPolicy.__init__ at 0x7fed0a5e70a0>", "_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at 0x7fed0a5e7130>", "reset_noise": "<function ActorCriticPolicy.reset_noise at 0x7fed0a5e71c0>", "_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at 0x7fed0a5e7250>", "_build": "<function ActorCriticPolicy._build at 0x7fed0a5e72e0>", "forward": "<function ActorCriticPolicy.forward at 0x7fed0a5e7370>", "extract_features": "<function ActorCriticPolicy.extract_features at 0x7fed0a5e7400>", "_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at 0x7fed0a5e7490>", "_predict": "<function ActorCriticPolicy._predict at 0x7fed0a5e7520>", "evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at 0x7fed0a5e75b0>", "get_distribution": "<function ActorCriticPolicy.get_distribution at 0x7fed0a5e7640>", "predict_values": "<function ActorCriticPolicy.predict_values at 0x7fed0a5e76d0>", "__abstractmethods__": "frozenset()", "_abc_impl": "<_abc._abc_data object at 0x7fed0a5e9980>"}, "verbose": 1, "policy_kwargs": {":type:": "<class 'dict'>", ":serialized:": "gAWVowAAAAAAAAB9lCiMDGxvZ19zdGRfaW5pdJRK/v///4wKb3J0aG9faW5pdJSJjA9vcHRpbWl6ZXJfY2xhc3OUjBN0b3JjaC5vcHRpbS5ybXNwcm9wlIwHUk1TcHJvcJSTlIwQb3B0aW1pemVyX2t3YXJnc5R9lCiMBWFscGhhlEc/764UeuFHrowDZXBzlEc+5Pi1iONo8YwMd2VpZ2h0X2RlY2F5lEsAdXUu", "log_std_init": -2, "ortho_init": false, "optimizer_class": "<class 'torch.optim.rmsprop.RMSprop'>", "optimizer_kwargs": {"alpha": 0.99, "eps": 1e-05, "weight_decay": 0}}, "observation_space": {":type:": "<class 'gym.spaces.box.Box'>", ":serialized:": "gAWVbQIAAAAAAACMDmd5bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lIwFZHR5cGWUk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMBl9zaGFwZZRLHIWUjANsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWcAAAAAAAAAAAAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/lGgLSxyFlIwBQ5R0lFKUjARoaWdolGgTKJZwAAAAAAAAAAAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH+UaAtLHIWUaBZ0lFKUjA1ib3VuZGVkX2JlbG93lGgTKJYcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACUaAiMAmIxlImIh5RSlChLA4wBfJROTk5K/////0r/////SwB0lGJLHIWUaBZ0lFKUjA1ib3VuZGVkX2Fib3ZllGgTKJYcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACUaCJLHIWUaBZ0lFKUjApfbnBfcmFuZG9tlE51Yi4=", "dtype": "float32", "_shape": [28], "low": "[-inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf\n -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf]", "high": "[inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf\n inf inf inf inf inf inf inf inf inf inf]", "bounded_below": "[False False False False False False False False False False False False\n False False False False False False False False False False False False\n False False False False]", "bounded_above": "[False False False False False False False False False False False False\n False False False False False False False False False False False False\n False False False False]", "_np_random": null}, "action_space": {":type:": "<class 'gym.spaces.box.Box'>", ":serialized:": "gAWVpQEAAAAAAACMDmd5bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lIwFZHR5cGWUk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMBl9zaGFwZZRLCIWUjANsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWIAAAAAAAAAAAAIC/AACAvwAAgL8AAIC/AACAvwAAgL8AAIC/AACAv5RoC0sIhZSMAUOUdJRSlIwEaGlnaJRoEyiWIAAAAAAAAAAAAIA/AACAPwAAgD8AAIA/AACAPwAAgD8AAIA/AACAP5RoC0sIhZRoFnSUUpSMDWJvdW5kZWRfYmVsb3eUaBMolggAAAAAAAAAAQEBAQEBAQGUaAiMAmIxlImIh5RSlChLA4wBfJROTk5K/////0r/////SwB0lGJLCIWUaBZ0lFKUjA1ib3VuZGVkX2Fib3ZllGgTKJYIAAAAAAAAAAEBAQEBAQEBlGgiSwiFlGgWdJRSlIwKX25wX3JhbmRvbZROdWIu", "dtype": "float32", "_shape": [8], "low": "[-1. -1. -1. -1. -1. -1. -1. -1.]", "high": "[1. 1. 1. 1. 1. 1. 1. 1.]", "bounded_below": "[ True True True True True True True True]", "bounded_above": "[ True True True True True True True True]", "_np_random": null}, "n_envs": 4, "num_timesteps": 2000000, "_total_timesteps": 2000000, "_num_timesteps_at_start": 0, "seed": null, "action_noise": null, "start_time": 1685487574575203321, "learning_rate": 0.00096, "tensorboard_log": null, "lr_schedule": {":type:": "<class 'function'>", ":serialized:": "gAWV6wIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMXC9ob21lL2hhaWRlci9taW5jb25kYTMvdWJ1bnR1L2xpYi9weXRob24zLjEwL3NpdGUtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lIwEZnVuY5RLgkMCBAGUjAN2YWyUhZQpdJRSlH2UKIwLX19wYWNrYWdlX1+UjBhzdGFibGVfYmFzZWxpbmVzMy5jb21tb26UjAhfX25hbWVfX5SMHnN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi51dGlsc5SMCF9fZmlsZV9flIxcL2hvbWUvaGFpZGVyL21pbmNvbmRhMy91YnVudHUvbGliL3B5dGhvbjMuMTAvc2l0ZS1wYWNrYWdlcy9zdGFibGVfYmFzZWxpbmVzMy9jb21tb24vdXRpbHMucHmUdU5OaACMEF9tYWtlX2VtcHR5X2NlbGyUk5QpUpSFlHSUUpSMHGNsb3VkcGlja2xlLmNsb3VkcGlja2xlX2Zhc3SUjBJfZnVuY3Rpb25fc2V0c3RhdGWUk5RoH32UfZQoaBZoDYwMX19xdWFsbmFtZV9flIwZY29uc3RhbnRfZm4uPGxvY2Fscz4uZnVuY5SMD19fYW5ub3RhdGlvbnNfX5R9lIwOX19rd2RlZmF1bHRzX1+UTowMX19kZWZhdWx0c19flE6MCl9fbW9kdWxlX1+UaBeMB19fZG9jX1+UTowLX19jbG9zdXJlX1+UaACMCl9tYWtlX2NlbGyUk5RHP091EE1VHWmFlFKUhZSMF19jbG91ZHBpY2tsZV9zdWJtb2R1bGVzlF2UjAtfX2dsb2JhbHNfX5R9lHWGlIZSMC4="}, "_last_obs": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVNQIAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJbAAQAAAAAAAKxJWr+xpyU/3SAMPzaEcL5lSLQ+OVSjPus4cDz1XAU9Ak/pvVo1tD5rAYS+hPghPjsk/76LC709xQoGPyD3xT0aM7c/VQdnvifMrz6qGhg93yRevUyYVL/2x4u+dgj7vk8EOz/30r4+yED/PjAhkL+5aba/8gAwPwNqCT9ohh+/oCdVvl29fz6kzr88IQ8zPgSo/b515hg/bfAMvz1oCT0vpzS/WKiQPtJQSD9yCtQ+AhlCP8oG5DxUvDM/qVMpvtmCvD0JNY0+3GUXvxrjxj1PBDs/99K+PshA/z4wIZC/0iiGv5uRyz5EnRU/ttNEPkpT6b7bFZQ+Fr2IPmHaUz5T9T8+ArGLPqH0nr5x2Wo9U+qvvvj+V75Mz8w+089AvmKVED+uE+e+S6UoP0jxtD37kmQ+qZqgPK81CL9yo4W+TwQ7P/fSvj7IQP8+MCGQvwNBTb/nPxs/+YUOP0YgpD5SWjU/tmYiP0j7jL6NlG++QMAHPhUHpz7UZ5u+QF+6PrxE3r5Q9fg9JHBYPy5O0r4gYm4/wyEAv41phD7xbPo+rs3avE1TOr8N8fa+01qLPU8EOz/30r4+yED/PjAhkL+UjAVudW1weZSMBWR0eXBllJOUjAJmNJSJiIeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiSwRLHIaUjAFDlHSUUpQu"}, "_last_episode_starts": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVdwAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYEAAAAAAAAAAAAAACUjAVudW1weZSMBWR0eXBllJOUjAJiMZSJiIeUUpQoSwOMAXyUTk5OSv////9K/////0sAdJRiSwSFlIwBQ5R0lFKULg=="}, "_last_original_obs": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVNQIAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJbAAQAAAAAAAAAAAABArv01AACAPwAAAAAAAAAAAAAAAAAAAAAAAACAcJ5fvQAAAADbAPK/AAAAABEaD74AAAAAQHn6PwAAAACZa9Y8AAAAAALy5z8AAAAAOigQvgAAAAB909m/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAiCqBNgAAgD8AAAAAAAAAAAAAAAAAAAAAAAAAgAiSor0AAAAAJMX3vwAAAADvZMw9AAAAALge9z8AAAAAS9e+PQAAAABdhPk/AAAAADxT2D0AAAAASnfovwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMaATDYAAIA/AAAAAAAAAAAAAAAAAAAAAAAAAIDMNRI+AAAAAL6A5r8AAAAAvYfsPQAAAADr5v0/AAAAAH2hmT0AAAAAwtD5PwAAAAADm5m9AAAAAGmV5b8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADVgrQ0AACAPwAAAAAAAAAAAAAAAAAAAAAAAACA29iLvQAAAAB9pdm/AAAAADYgu7wAAAAApmbqPwAAAAAgHzu8AAAAAGlq9D8AAAAAZSm2PAAAAABw8eK/AAAAAAAAAAAAAAAAAAAAAAAAAACUjAVudW1weZSMBWR0eXBllJOUjAJmNJSJiIeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiSwRLHIaUjAFDlHSUUpQu"}, "_episode_num": 0, "use_sde": true, "sde_sample_freq": -1, "_current_progress_remaining": 0.0, "ep_info_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVRAwAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpRHQJKD0Fjd56eMAWyUTegDjAF0lEdApLADcZccEXV9lChoBkdAk8eb9l2/z2gHTegDaAhHQKSyZfWtlqd1fZQoaAZHQJOvuL61stVoB03oA2gIR0Cksse+mFajdX2UKGgGR0CJ1LlYEGJOaAdN6ANoCEdApLNl7Y02tXV9lChoBkdAlkn4smOU+2gHTegDaAhHQKS/4o0hvBJ1fZQoaAZHQJcFqYrrgO1oB03oA2gIR0CkwjIDPnjidX2UKGgGR0CUGEtALRa5aAdN6ANoCEdApMKcIVuaW3V9lChoBkdAk3WIBaLXMGgHTegDaAhHQKTDPJwsGxF1fZQoaAZHQJblhzfaYeFoB03oA2gIR0Ckz8HUDuBudX2UKGgGR0CTcMhm5DqoaAdN6ANoCEdApNIu7tiQT3V9lChoBkdAltB3EdeY2WgHTegDaAhHQKTSkgvDgqF1fZQoaAZHQIud3IS13MZoB03oA2gIR0Ck0zJXyRSxdX2UKGgGR0CXV+5jH4oJaAdN6ANoCEdApN9+pOvdM3V9lChoBkdAlf4FtO2y9mgHTegDaAhHQKThyxiXpnp1fZQoaAZHQItHbMcIZ65oB03oA2gIR0Ck4iQ97ngYdX2UKGgGR0CWV/8pkPMCaAdN6ANoCEdApOK+FlCkXXV9lChoBkdAl2XnGOuJUGgHTegDaAhHQKTv26PKdQR1fZQoaAZHQJUHAk6cRUZoB03oA2gIR0Ck8lNHpbD/dX2UKGgGR0CXRa51eSjhaAdN6ANoCEdApPK7A57w8XV9lChoBkdAlBrpxeb/fmgHTegDaAhHQKTzYHbAUL51fZQoaAZHQJTZ+fywwCdoB03oA2gIR0ClALVJUYKqdX2UKGgGR0CUx9Ti83+/aAdN6ANoCEdApQMy7TUiIXV9lChoBkdAli9783uNP2gHTegDaAhHQKUDrUPxx1h1fZQoaAZHQJTBoLYwqRVoB03oA2gIR0ClBEqw6hg3dX2UKGgGR0CQ2jysjmjkaAdN6ANoCEdApREcRUWEb3V9lChoBkdAk+pLVFx4p2gHTegDaAhHQKUTf6VMVUN1fZQoaAZHQJfI84Qz1sdoB03oA2gIR0ClE+tLL6k7dX2UKGgGR0CRTLkE9t/GaAdN6ANoCEdApRSHnKW9lHV9lChoBkdAlMzGucMEzWgHTegDaAhHQKUhlIo3Jgd1fZQoaAZHQJbP5UlzEJloB03oA2gIR0ClI/yteUpvdX2UKGgGR0CWE0RdyDIzaAdN6ANoCEdApSR1jG1hLHV9lChoBkdAi1TmGVRk3GgHTegDaAhHQKUlJDQ7cO91fZQoaAZHQJgTMK3NLUVoB03oA2gIR0ClMeBtDUmVdX2UKGgGR0CWToa7VawEaAdN6ANoCEdApTRLXrdFfHV9lChoBkdAlqTEmplz2mgHTegDaAhHQKU0rdzGPxR1fZQoaAZHQJhKQ61b7j1oB03oA2gIR0ClNUKyfL9udX2UKGgGR0CW1xw7kn1GaAdN6ANoCEdApUIG65Gz8nV9lChoBkdAlqmawQlKLGgHTegDaAhHQKVEVxc3VCp1fZQoaAZHQJKbFFI/Z/VoB03oA2gIR0ClRL9X9zfadX2UKGgGR0CXmQOHWSU1aAdN6ANoCEdApUVYoVmBfHV9lChoBkdAkej/hIe5nWgHTegDaAhHQKVSFeSjgyd1fZQoaAZHQIsHiKm8/UxoB03oA2gIR0ClVHdWIXTFdX2UKGgGR0CXCN++/QBxaAdN6ANoCEdApVTnT/hl2HV9lChoBkdAlSS79VFQVWgHTegDaAhHQKVVh0L+glF1fZQoaAZHQJGik8uBczJoB03oA2gIR0ClYpgFPi1idX2UKGgGR0CTtaQ3xWkraAdN6ANoCEdApWUev4dp7HV9lChoBkdAkuGqK+BYm2gHTegDaAhHQKVlgc6vJRx1fZQoaAZHQJFoCiSJTERoB03oA2gIR0ClZhgLqlgudX2UKGgGR0CUqgNdqtYCaAdN6ANoCEdApXMhAt4A0nV9lChoBkdAfXwOoo/iYWgHTegDaAhHQKV1dzqbBoF1fZQoaAZHQJJPlQsPJ7toB03oA2gIR0CldeE2P1cudX2UKGgGR0CVSRpXp4bCaAdN6ANoCEdApXZ+yX2M9HV9lChoBkdAiww4msvIwWgHTegDaAhHQKWDFkvsZ511fZQoaAZHQJTByFyq+8JoB03oA2gIR0ClhXG2kSEldX2UKGgGR0CVsTqyGBWgaAdN6ANoCEdApYXcVclgMXV9lChoBkdAlcHrLdN34mgHTegDaAhHQKWGeO0b9611fZQoaAZHQJNfDYRNATtoB03oA2gIR0Clkyg75mAcdX2UKGgGR0CWfKVy3kPuaAdN6ANoCEdApZWOrp7kXHV9lChoBkdAjXBUF0PpZGgHTegDaAhHQKWV7Q8fV7R1fZQoaAZHQJOfFRqGlANoB03oA2gIR0Cllo+LehwmdX2UKGgGR0CTH7R4hUzbaAdN6ANoCEdApaMzIaLn93V9lChoBkdAkrpkCV8kU2gHTegDaAhHQKWliyquKXR1fZQoaAZHQJHIgb1h9b5oB03oA2gIR0ClpgA9Net0dX2UKGgGR0CQUBkXUH6eaAdN6ANoCEdApaanBvaURnV9lChoBkdAlVBGs3hn8WgHTegDaAhHQKWz8RBeHBV1fZQoaAZHQJN06UOd5IJoB03oA2gIR0Cltjr2xptadX2UKGgGR0CWMUAMlTm5aAdN6ANoCEdApbahPfsNUnV9lChoBkdAlRy5JoTPB2gHTegDaAhHQKW3TAs052h1fZQoaAZHQJaJUYfnwG5oB03oA2gIR0Clw+Bz3h4udX2UKGgGR0CTOWJf6XSjaAdN6ANoCEdApcY7sOXmeXV9lChoBkdAlbn+nl4keWgHTegDaAhHQKXGmtuDSPV1fZQoaAZHQJQHCgf2bodoB03oA2gIR0ClxzpItlI3dX2UKGgGR0CU8F3nIQvpaAdN6ANoCEdApdPF6C17Y3V9lChoBkdAk5/bHMlkY2gHTegDaAhHQKXWLst03fh1fZQoaAZHQJVKBWbPQfJoB03oA2gIR0Cl1pugQHzIdX2UKGgGR0CTlvtrbg0kaAdN6ANoCEdApdc+Dxsl9nV9lChoBkdAlQsqiCaqj2gHTegDaAhHQKXj298JD3N1fZQoaAZHQJXWiTbFjutoB03oA2gIR0Cl5jUtyxRmdX2UKGgGR0CWXSjiGWUsaAdN6ANoCEdApealKf4AS3V9lChoBkdAlM0Tf3vhImgHTegDaAhHQKXnVfLs8gZ1fZQoaAZHQJXqQS9M9KVoB03oA2gIR0Cl89iYb83udX2UKGgGR0CVCDqrR0EHaAdN6ANoCEdApfYklTm4iHV9lChoBkdAleG1QqI8AGgHTegDaAhHQKX2kOYIBzV1fZQoaAZHQJWjLMmnfl9oB03oA2gIR0Cl9yxJ/XoUdX2UKGgGR0CWTBoWYWtVaAdN6ANoCEdApgOwZuQ6qHV9lChoBkdAlauz7l7tzGgHTegDaAhHQKYGAkv9LpR1fZQoaAZHQJdDpp35eqtoB03oA2gIR0CmBmWZy+6AdX2UKGgGR0CWZimF8G9paAdN6ANoCEdApgcF36hxpHV9lChoBkdAlnZWkzoECGgHTegDaAhHQKYTcBDohZB1fZQoaAZHQJTU7IKc/dJoB03oA2gIR0CmFb0IcBEKdX2UKGgGR0CTW//Ot4iYaAdN6ANoCEdAphYs6FM7EHV9lChoBkdAlaOURFqi5GgHTegDaAhHQKYW1LdvbXZ1fZQoaAZHQJNyvEfkmyBoB03oA2gIR0CmI4XQdCE6dX2UKGgGR0CRm/hAnlXBaAdN6ANoCEdApiXWC/XXiHV9lChoBkdAk5GQAAAAAGgHTegDaAhHQKYmOjvd/KB1fZQoaAZHQJI3wOBlMAZoB03oA2gIR0CmJs+lsP8RdX2UKGgGR0CSeSG4I8hcaAdN6ANoCEdApjOJceKba3V9lChoBkdAlDNzjFQ2uWgHTegDaAhHQKY10IMSbph1fZQoaAZHQJUM4q6OHWVoB03oA2gIR0CmNjW+GoJidX2UKGgGR0CSSd9VFQVLaAdN6ANoCEdApjbUT37DVHVlLg=="}, "ep_success_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="}, "_n_updates": 62500, "n_steps": 8, "gamma": 0.99, "gae_lambda": 0.9, "ent_coef": 0.0, "vf_coef": 0.4, "max_grad_norm": 0.5, "normalize_advantage": false, "system_info": {"OS": "Linux-5.10.16.3-microsoft-standard-WSL2-x86_64-with-glibc2.35 # 1 SMP Fri Apr 2 22:23:49 UTC 2021", "Python": "3.10.9", "Stable-Baselines3": "1.8.0a2", "PyTorch": "1.13.1+cu117", "GPU Enabled": "True", "Numpy": "1.23.5", "Gym": "0.21.0"}}
|
replay.mp4
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2095df834fda9067fd282acc8a952b4ce8612e7fde328fb2363afa96375f36a3
|
3 |
+
size 845611
|
results.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"mean_reward":
|
|
|
1 |
+
{"mean_reward": 1338.403076176066, "std_reward": 40.492487322875796, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2023-05-31T13:52:05.853236"}
|
vec_normalize.pkl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 2136
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:292836cf4ab09efdd2ead7229967b51f0f04fa202dfe437a56637e368e2b380c
|
3 |
size 2136
|