fortminors
commited on
Commit
•
2df4191
1
Parent(s):
957a71f
Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- .summary/0/events.out.tfevents.1724273840.96f39a858e07 +3 -0
- README.md +56 -0
- checkpoint_p0/best_000000650_2662400_reward_5.059.pth +3 -0
- checkpoint_p0/checkpoint_000000935_3829760.pth +3 -0
- checkpoint_p0/checkpoint_000000978_4005888.pth +3 -0
- config.json +142 -0
- replay.mp4 +3 -0
- sf_log.txt +969 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
replay.mp4 filter=lfs diff=lfs merge=lfs -text
|
.summary/0/events.out.tfevents.1724273840.96f39a858e07
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6d682fc374fb81a4547b21dfd2c17495f6bd8f61ca2620686d50ec355769ed21
|
3 |
+
size 928065
|
README.md
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: sample-factory
|
3 |
+
tags:
|
4 |
+
- deep-reinforcement-learning
|
5 |
+
- reinforcement-learning
|
6 |
+
- sample-factory
|
7 |
+
model-index:
|
8 |
+
- name: APPO
|
9 |
+
results:
|
10 |
+
- task:
|
11 |
+
type: reinforcement-learning
|
12 |
+
name: reinforcement-learning
|
13 |
+
dataset:
|
14 |
+
name: doom_health_gathering_supreme
|
15 |
+
type: doom_health_gathering_supreme
|
16 |
+
metrics:
|
17 |
+
- type: mean_reward
|
18 |
+
value: 4.06 +/- 0.70
|
19 |
+
name: mean_reward
|
20 |
+
verified: false
|
21 |
+
---
|
22 |
+
|
23 |
+
A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment.
|
24 |
+
|
25 |
+
This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory.
|
26 |
+
Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/
|
27 |
+
|
28 |
+
|
29 |
+
## Downloading the model
|
30 |
+
|
31 |
+
After installing Sample-Factory, download the model with:
|
32 |
+
```
|
33 |
+
python -m sample_factory.huggingface.load_from_hub -r fortminors/rl_course_vizdoom_health_gathering_supreme
|
34 |
+
```
|
35 |
+
|
36 |
+
|
37 |
+
## Using the model
|
38 |
+
|
39 |
+
To run the model after download, use the `enjoy` script corresponding to this environment:
|
40 |
+
```
|
41 |
+
python -m <path.to.enjoy.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme
|
42 |
+
```
|
43 |
+
|
44 |
+
|
45 |
+
You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag.
|
46 |
+
See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details
|
47 |
+
|
48 |
+
## Training with this model
|
49 |
+
|
50 |
+
To continue training with this model, use the `train` script corresponding to this environment:
|
51 |
+
```
|
52 |
+
python -m <path.to.train.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000
|
53 |
+
```
|
54 |
+
|
55 |
+
Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
|
56 |
+
|
checkpoint_p0/best_000000650_2662400_reward_5.059.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a6be9e19fe312d555dbafeddc9b2c74d8440d34048013087adda754bbd5bb4b8
|
3 |
+
size 34929051
|
checkpoint_p0/checkpoint_000000935_3829760.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d32e472fd9c7ba4eff33900bf5ad3d3ba072ad8a4af2b50794da98d76a743559
|
3 |
+
size 34929477
|
checkpoint_p0/checkpoint_000000978_4005888.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ff28ae3749dc198168ae3c123f46565fce2a02012e9986019054d84c17f00e6f
|
3 |
+
size 34929477
|
config.json
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"help": false,
|
3 |
+
"algo": "APPO",
|
4 |
+
"env": "doom_health_gathering_supreme",
|
5 |
+
"experiment": "default_experiment",
|
6 |
+
"train_dir": "/content/train_dir",
|
7 |
+
"restart_behavior": "resume",
|
8 |
+
"device": "gpu",
|
9 |
+
"seed": null,
|
10 |
+
"num_policies": 1,
|
11 |
+
"async_rl": true,
|
12 |
+
"serial_mode": false,
|
13 |
+
"batched_sampling": false,
|
14 |
+
"num_batches_to_accumulate": 2,
|
15 |
+
"worker_num_splits": 2,
|
16 |
+
"policy_workers_per_policy": 1,
|
17 |
+
"max_policy_lag": 1000,
|
18 |
+
"num_workers": 8,
|
19 |
+
"num_envs_per_worker": 4,
|
20 |
+
"batch_size": 1024,
|
21 |
+
"num_batches_per_epoch": 1,
|
22 |
+
"num_epochs": 1,
|
23 |
+
"rollout": 32,
|
24 |
+
"recurrence": 32,
|
25 |
+
"shuffle_minibatches": false,
|
26 |
+
"gamma": 0.99,
|
27 |
+
"reward_scale": 1.0,
|
28 |
+
"reward_clip": 1000.0,
|
29 |
+
"value_bootstrap": false,
|
30 |
+
"normalize_returns": true,
|
31 |
+
"exploration_loss_coeff": 0.001,
|
32 |
+
"value_loss_coeff": 0.5,
|
33 |
+
"kl_loss_coeff": 0.0,
|
34 |
+
"exploration_loss": "symmetric_kl",
|
35 |
+
"gae_lambda": 0.95,
|
36 |
+
"ppo_clip_ratio": 0.1,
|
37 |
+
"ppo_clip_value": 0.2,
|
38 |
+
"with_vtrace": false,
|
39 |
+
"vtrace_rho": 1.0,
|
40 |
+
"vtrace_c": 1.0,
|
41 |
+
"optimizer": "adam",
|
42 |
+
"adam_eps": 1e-06,
|
43 |
+
"adam_beta1": 0.9,
|
44 |
+
"adam_beta2": 0.999,
|
45 |
+
"max_grad_norm": 4.0,
|
46 |
+
"learning_rate": 0.0001,
|
47 |
+
"lr_schedule": "constant",
|
48 |
+
"lr_schedule_kl_threshold": 0.008,
|
49 |
+
"lr_adaptive_min": 1e-06,
|
50 |
+
"lr_adaptive_max": 0.01,
|
51 |
+
"obs_subtract_mean": 0.0,
|
52 |
+
"obs_scale": 255.0,
|
53 |
+
"normalize_input": true,
|
54 |
+
"normalize_input_keys": null,
|
55 |
+
"decorrelate_experience_max_seconds": 0,
|
56 |
+
"decorrelate_envs_on_one_worker": true,
|
57 |
+
"actor_worker_gpus": [],
|
58 |
+
"set_workers_cpu_affinity": true,
|
59 |
+
"force_envs_single_thread": false,
|
60 |
+
"default_niceness": 0,
|
61 |
+
"log_to_file": true,
|
62 |
+
"experiment_summaries_interval": 10,
|
63 |
+
"flush_summaries_interval": 30,
|
64 |
+
"stats_avg": 100,
|
65 |
+
"summaries_use_frameskip": true,
|
66 |
+
"heartbeat_interval": 20,
|
67 |
+
"heartbeat_reporting_interval": 600,
|
68 |
+
"train_for_env_steps": 4000000,
|
69 |
+
"train_for_seconds": 10000000000,
|
70 |
+
"save_every_sec": 120,
|
71 |
+
"keep_checkpoints": 2,
|
72 |
+
"load_checkpoint_kind": "latest",
|
73 |
+
"save_milestones_sec": -1,
|
74 |
+
"save_best_every_sec": 5,
|
75 |
+
"save_best_metric": "reward",
|
76 |
+
"save_best_after": 100000,
|
77 |
+
"benchmark": false,
|
78 |
+
"encoder_mlp_layers": [
|
79 |
+
512,
|
80 |
+
512
|
81 |
+
],
|
82 |
+
"encoder_conv_architecture": "convnet_simple",
|
83 |
+
"encoder_conv_mlp_layers": [
|
84 |
+
512
|
85 |
+
],
|
86 |
+
"use_rnn": true,
|
87 |
+
"rnn_size": 512,
|
88 |
+
"rnn_type": "gru",
|
89 |
+
"rnn_num_layers": 1,
|
90 |
+
"decoder_mlp_layers": [],
|
91 |
+
"nonlinearity": "elu",
|
92 |
+
"policy_initialization": "orthogonal",
|
93 |
+
"policy_init_gain": 1.0,
|
94 |
+
"actor_critic_share_weights": true,
|
95 |
+
"adaptive_stddev": true,
|
96 |
+
"continuous_tanh_scale": 0.0,
|
97 |
+
"initial_stddev": 1.0,
|
98 |
+
"use_env_info_cache": false,
|
99 |
+
"env_gpu_actions": false,
|
100 |
+
"env_gpu_observations": true,
|
101 |
+
"env_frameskip": 4,
|
102 |
+
"env_framestack": 1,
|
103 |
+
"pixel_format": "CHW",
|
104 |
+
"use_record_episode_statistics": false,
|
105 |
+
"with_wandb": false,
|
106 |
+
"wandb_user": null,
|
107 |
+
"wandb_project": "sample_factory",
|
108 |
+
"wandb_group": null,
|
109 |
+
"wandb_job_type": "SF",
|
110 |
+
"wandb_tags": [],
|
111 |
+
"with_pbt": false,
|
112 |
+
"pbt_mix_policies_in_one_env": true,
|
113 |
+
"pbt_period_env_steps": 5000000,
|
114 |
+
"pbt_start_mutation": 20000000,
|
115 |
+
"pbt_replace_fraction": 0.3,
|
116 |
+
"pbt_mutation_rate": 0.15,
|
117 |
+
"pbt_replace_reward_gap": 0.1,
|
118 |
+
"pbt_replace_reward_gap_absolute": 1e-06,
|
119 |
+
"pbt_optimize_gamma": false,
|
120 |
+
"pbt_target_objective": "true_objective",
|
121 |
+
"pbt_perturb_min": 1.1,
|
122 |
+
"pbt_perturb_max": 1.5,
|
123 |
+
"num_agents": -1,
|
124 |
+
"num_humans": 0,
|
125 |
+
"num_bots": -1,
|
126 |
+
"start_bot_difficulty": null,
|
127 |
+
"timelimit": null,
|
128 |
+
"res_w": 128,
|
129 |
+
"res_h": 72,
|
130 |
+
"wide_aspect_ratio": false,
|
131 |
+
"eval_env_frameskip": 1,
|
132 |
+
"fps": 35,
|
133 |
+
"command_line": "--env=doom_health_gathering_supreme --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=4000000",
|
134 |
+
"cli_args": {
|
135 |
+
"env": "doom_health_gathering_supreme",
|
136 |
+
"num_workers": 8,
|
137 |
+
"num_envs_per_worker": 4,
|
138 |
+
"train_for_env_steps": 4000000
|
139 |
+
},
|
140 |
+
"git_hash": "unknown",
|
141 |
+
"git_repo_name": "not a git repository"
|
142 |
+
}
|
replay.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e389ac0ec10c9c52440058129cef51a2b4705d2655912b710921b5172dd44690
|
3 |
+
size 5177758
|
sf_log.txt
ADDED
@@ -0,0 +1,969 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2024-08-21 20:57:25,004][00286] Saving configuration to /content/train_dir/default_experiment/config.json...
|
2 |
+
[2024-08-21 20:57:25,006][00286] Rollout worker 0 uses device cpu
|
3 |
+
[2024-08-21 20:57:25,008][00286] Rollout worker 1 uses device cpu
|
4 |
+
[2024-08-21 20:57:25,009][00286] Rollout worker 2 uses device cpu
|
5 |
+
[2024-08-21 20:57:25,010][00286] Rollout worker 3 uses device cpu
|
6 |
+
[2024-08-21 20:57:25,011][00286] Rollout worker 4 uses device cpu
|
7 |
+
[2024-08-21 20:57:25,012][00286] Rollout worker 5 uses device cpu
|
8 |
+
[2024-08-21 20:57:25,013][00286] Rollout worker 6 uses device cpu
|
9 |
+
[2024-08-21 20:57:25,014][00286] Rollout worker 7 uses device cpu
|
10 |
+
[2024-08-21 20:57:25,183][00286] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
11 |
+
[2024-08-21 20:57:25,185][00286] InferenceWorker_p0-w0: min num requests: 2
|
12 |
+
[2024-08-21 20:57:25,219][00286] Starting all processes...
|
13 |
+
[2024-08-21 20:57:25,220][00286] Starting process learner_proc0
|
14 |
+
[2024-08-21 20:57:26,610][00286] Starting all processes...
|
15 |
+
[2024-08-21 20:57:26,622][00286] Starting process inference_proc0-0
|
16 |
+
[2024-08-21 20:57:26,622][00286] Starting process rollout_proc0
|
17 |
+
[2024-08-21 20:57:26,623][00286] Starting process rollout_proc1
|
18 |
+
[2024-08-21 20:57:26,623][00286] Starting process rollout_proc2
|
19 |
+
[2024-08-21 20:57:26,623][00286] Starting process rollout_proc3
|
20 |
+
[2024-08-21 20:57:26,623][00286] Starting process rollout_proc4
|
21 |
+
[2024-08-21 20:57:26,623][00286] Starting process rollout_proc5
|
22 |
+
[2024-08-21 20:57:26,623][00286] Starting process rollout_proc6
|
23 |
+
[2024-08-21 20:57:26,623][00286] Starting process rollout_proc7
|
24 |
+
[2024-08-21 20:57:41,211][03216] Worker 2 uses CPU cores [0]
|
25 |
+
[2024-08-21 20:57:41,324][03197] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
26 |
+
[2024-08-21 20:57:41,327][03197] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0
|
27 |
+
[2024-08-21 20:57:41,414][03197] Num visible devices: 1
|
28 |
+
[2024-08-21 20:57:41,424][03217] Worker 1 uses CPU cores [1]
|
29 |
+
[2024-08-21 20:57:41,441][03197] Starting seed is not provided
|
30 |
+
[2024-08-21 20:57:41,442][03197] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
31 |
+
[2024-08-21 20:57:41,443][03197] Initializing actor-critic model on device cuda:0
|
32 |
+
[2024-08-21 20:57:41,444][03197] RunningMeanStd input shape: (3, 72, 128)
|
33 |
+
[2024-08-21 20:57:41,447][03197] RunningMeanStd input shape: (1,)
|
34 |
+
[2024-08-21 20:57:41,472][03221] Worker 7 uses CPU cores [1]
|
35 |
+
[2024-08-21 20:57:41,482][03197] ConvEncoder: input_channels=3
|
36 |
+
[2024-08-21 20:57:41,524][03219] Worker 5 uses CPU cores [1]
|
37 |
+
[2024-08-21 20:57:41,544][03218] Worker 3 uses CPU cores [1]
|
38 |
+
[2024-08-21 20:57:41,551][03214] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
39 |
+
[2024-08-21 20:57:41,552][03214] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0
|
40 |
+
[2024-08-21 20:57:41,604][03214] Num visible devices: 1
|
41 |
+
[2024-08-21 20:57:41,655][03222] Worker 6 uses CPU cores [0]
|
42 |
+
[2024-08-21 20:57:41,696][03215] Worker 0 uses CPU cores [0]
|
43 |
+
[2024-08-21 20:57:41,720][03220] Worker 4 uses CPU cores [0]
|
44 |
+
[2024-08-21 20:57:41,815][03197] Conv encoder output size: 512
|
45 |
+
[2024-08-21 20:57:41,815][03197] Policy head output size: 512
|
46 |
+
[2024-08-21 20:57:41,876][03197] Created Actor Critic model with architecture:
|
47 |
+
[2024-08-21 20:57:41,876][03197] ActorCriticSharedWeights(
|
48 |
+
(obs_normalizer): ObservationNormalizer(
|
49 |
+
(running_mean_std): RunningMeanStdDictInPlace(
|
50 |
+
(running_mean_std): ModuleDict(
|
51 |
+
(obs): RunningMeanStdInPlace()
|
52 |
+
)
|
53 |
+
)
|
54 |
+
)
|
55 |
+
(returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace)
|
56 |
+
(encoder): VizdoomEncoder(
|
57 |
+
(basic_encoder): ConvEncoder(
|
58 |
+
(enc): RecursiveScriptModule(
|
59 |
+
original_name=ConvEncoderImpl
|
60 |
+
(conv_head): RecursiveScriptModule(
|
61 |
+
original_name=Sequential
|
62 |
+
(0): RecursiveScriptModule(original_name=Conv2d)
|
63 |
+
(1): RecursiveScriptModule(original_name=ELU)
|
64 |
+
(2): RecursiveScriptModule(original_name=Conv2d)
|
65 |
+
(3): RecursiveScriptModule(original_name=ELU)
|
66 |
+
(4): RecursiveScriptModule(original_name=Conv2d)
|
67 |
+
(5): RecursiveScriptModule(original_name=ELU)
|
68 |
+
)
|
69 |
+
(mlp_layers): RecursiveScriptModule(
|
70 |
+
original_name=Sequential
|
71 |
+
(0): RecursiveScriptModule(original_name=Linear)
|
72 |
+
(1): RecursiveScriptModule(original_name=ELU)
|
73 |
+
)
|
74 |
+
)
|
75 |
+
)
|
76 |
+
)
|
77 |
+
(core): ModelCoreRNN(
|
78 |
+
(core): GRU(512, 512)
|
79 |
+
)
|
80 |
+
(decoder): MlpDecoder(
|
81 |
+
(mlp): Identity()
|
82 |
+
)
|
83 |
+
(critic_linear): Linear(in_features=512, out_features=1, bias=True)
|
84 |
+
(action_parameterization): ActionParameterizationDefault(
|
85 |
+
(distribution_linear): Linear(in_features=512, out_features=5, bias=True)
|
86 |
+
)
|
87 |
+
)
|
88 |
+
[2024-08-21 20:57:42,146][03197] Using optimizer <class 'torch.optim.adam.Adam'>
|
89 |
+
[2024-08-21 20:57:42,917][03197] No checkpoints found
|
90 |
+
[2024-08-21 20:57:42,917][03197] Did not load from checkpoint, starting from scratch!
|
91 |
+
[2024-08-21 20:57:42,917][03197] Initialized policy 0 weights for model version 0
|
92 |
+
[2024-08-21 20:57:42,921][03197] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
93 |
+
[2024-08-21 20:57:42,946][03197] LearnerWorker_p0 finished initialization!
|
94 |
+
[2024-08-21 20:57:43,067][03214] RunningMeanStd input shape: (3, 72, 128)
|
95 |
+
[2024-08-21 20:57:43,068][03214] RunningMeanStd input shape: (1,)
|
96 |
+
[2024-08-21 20:57:43,081][03214] ConvEncoder: input_channels=3
|
97 |
+
[2024-08-21 20:57:43,188][03214] Conv encoder output size: 512
|
98 |
+
[2024-08-21 20:57:43,189][03214] Policy head output size: 512
|
99 |
+
[2024-08-21 20:57:43,273][00286] Inference worker 0-0 is ready!
|
100 |
+
[2024-08-21 20:57:43,275][00286] All inference workers are ready! Signal rollout workers to start!
|
101 |
+
[2024-08-21 20:57:43,718][03221] Doom resolution: 160x120, resize resolution: (128, 72)
|
102 |
+
[2024-08-21 20:57:43,738][03215] Doom resolution: 160x120, resize resolution: (128, 72)
|
103 |
+
[2024-08-21 20:57:43,763][03216] Doom resolution: 160x120, resize resolution: (128, 72)
|
104 |
+
[2024-08-21 20:57:43,780][03220] Doom resolution: 160x120, resize resolution: (128, 72)
|
105 |
+
[2024-08-21 20:57:43,781][03222] Doom resolution: 160x120, resize resolution: (128, 72)
|
106 |
+
[2024-08-21 20:57:43,834][03217] Doom resolution: 160x120, resize resolution: (128, 72)
|
107 |
+
[2024-08-21 20:57:43,842][03219] Doom resolution: 160x120, resize resolution: (128, 72)
|
108 |
+
[2024-08-21 20:57:43,866][03218] Doom resolution: 160x120, resize resolution: (128, 72)
|
109 |
+
[2024-08-21 20:57:45,175][00286] Heartbeat connected on Batcher_0
|
110 |
+
[2024-08-21 20:57:45,181][00286] Heartbeat connected on LearnerWorker_p0
|
111 |
+
[2024-08-21 20:57:45,210][00286] Heartbeat connected on InferenceWorker_p0-w0
|
112 |
+
[2024-08-21 20:57:45,793][00286] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 0. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
113 |
+
[2024-08-21 20:57:46,263][03216] Decorrelating experience for 0 frames...
|
114 |
+
[2024-08-21 20:57:46,264][03219] Decorrelating experience for 0 frames...
|
115 |
+
[2024-08-21 20:57:46,269][03220] Decorrelating experience for 0 frames...
|
116 |
+
[2024-08-21 20:57:46,262][03217] Decorrelating experience for 0 frames...
|
117 |
+
[2024-08-21 20:57:46,265][03221] Decorrelating experience for 0 frames...
|
118 |
+
[2024-08-21 20:57:46,271][03215] Decorrelating experience for 0 frames...
|
119 |
+
[2024-08-21 20:57:46,273][03222] Decorrelating experience for 0 frames...
|
120 |
+
[2024-08-21 20:57:46,985][03216] Decorrelating experience for 32 frames...
|
121 |
+
[2024-08-21 20:57:47,586][03218] Decorrelating experience for 0 frames...
|
122 |
+
[2024-08-21 20:57:47,603][03217] Decorrelating experience for 32 frames...
|
123 |
+
[2024-08-21 20:57:47,605][03219] Decorrelating experience for 32 frames...
|
124 |
+
[2024-08-21 20:57:48,445][03215] Decorrelating experience for 32 frames...
|
125 |
+
[2024-08-21 20:57:49,399][03218] Decorrelating experience for 32 frames...
|
126 |
+
[2024-08-21 20:57:49,404][03220] Decorrelating experience for 32 frames...
|
127 |
+
[2024-08-21 20:57:49,416][03221] Decorrelating experience for 32 frames...
|
128 |
+
[2024-08-21 20:57:49,723][03215] Decorrelating experience for 64 frames...
|
129 |
+
[2024-08-21 20:57:49,908][03217] Decorrelating experience for 64 frames...
|
130 |
+
[2024-08-21 20:57:49,931][03219] Decorrelating experience for 64 frames...
|
131 |
+
[2024-08-21 20:57:50,701][03216] Decorrelating experience for 64 frames...
|
132 |
+
[2024-08-21 20:57:50,793][00286] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
133 |
+
[2024-08-21 20:57:50,810][03218] Decorrelating experience for 64 frames...
|
134 |
+
[2024-08-21 20:57:50,854][03219] Decorrelating experience for 96 frames...
|
135 |
+
[2024-08-21 20:57:50,997][00286] Heartbeat connected on RolloutWorker_w5
|
136 |
+
[2024-08-21 20:57:51,223][03220] Decorrelating experience for 64 frames...
|
137 |
+
[2024-08-21 20:57:51,318][03215] Decorrelating experience for 96 frames...
|
138 |
+
[2024-08-21 20:57:51,654][03218] Decorrelating experience for 96 frames...
|
139 |
+
[2024-08-21 20:57:51,670][00286] Heartbeat connected on RolloutWorker_w0
|
140 |
+
[2024-08-21 20:57:51,870][00286] Heartbeat connected on RolloutWorker_w3
|
141 |
+
[2024-08-21 20:57:52,402][03217] Decorrelating experience for 96 frames...
|
142 |
+
[2024-08-21 20:57:52,571][03221] Decorrelating experience for 64 frames...
|
143 |
+
[2024-08-21 20:57:52,615][00286] Heartbeat connected on RolloutWorker_w1
|
144 |
+
[2024-08-21 20:57:53,060][03222] Decorrelating experience for 32 frames...
|
145 |
+
[2024-08-21 20:57:53,188][03216] Decorrelating experience for 96 frames...
|
146 |
+
[2024-08-21 20:57:53,430][00286] Heartbeat connected on RolloutWorker_w2
|
147 |
+
[2024-08-21 20:57:53,761][03220] Decorrelating experience for 96 frames...
|
148 |
+
[2024-08-21 20:57:53,984][00286] Heartbeat connected on RolloutWorker_w4
|
149 |
+
[2024-08-21 20:57:55,793][00286] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 179.2. Samples: 1792. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
150 |
+
[2024-08-21 20:57:55,796][00286] Avg episode reward: [(0, '2.106')]
|
151 |
+
[2024-08-21 20:57:56,012][03221] Decorrelating experience for 96 frames...
|
152 |
+
[2024-08-21 20:57:56,428][03197] Signal inference workers to stop experience collection...
|
153 |
+
[2024-08-21 20:57:56,456][03214] InferenceWorker_p0-w0: stopping experience collection
|
154 |
+
[2024-08-21 20:57:56,544][00286] Heartbeat connected on RolloutWorker_w7
|
155 |
+
[2024-08-21 20:57:56,740][03222] Decorrelating experience for 64 frames...
|
156 |
+
[2024-08-21 20:57:57,844][03222] Decorrelating experience for 96 frames...
|
157 |
+
[2024-08-21 20:57:57,955][00286] Heartbeat connected on RolloutWorker_w6
|
158 |
+
[2024-08-21 20:57:59,437][03197] Signal inference workers to resume experience collection...
|
159 |
+
[2024-08-21 20:57:59,438][03214] InferenceWorker_p0-w0: resuming experience collection
|
160 |
+
[2024-08-21 20:58:00,796][00286] Fps is (10 sec: 409.5, 60 sec: 273.0, 300 sec: 273.0). Total num frames: 4096. Throughput: 0: 167.2. Samples: 2508. Policy #0 lag: (min: 0.0, avg: 0.0, max: 0.0)
|
161 |
+
[2024-08-21 20:58:00,799][00286] Avg episode reward: [(0, '2.837')]
|
162 |
+
[2024-08-21 20:58:05,793][00286] Fps is (10 sec: 2048.0, 60 sec: 1024.0, 300 sec: 1024.0). Total num frames: 20480. Throughput: 0: 256.6. Samples: 5132. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
163 |
+
[2024-08-21 20:58:05,799][00286] Avg episode reward: [(0, '3.556')]
|
164 |
+
[2024-08-21 20:58:09,745][03214] Updated weights for policy 0, policy_version 10 (0.0036)
|
165 |
+
[2024-08-21 20:58:10,793][00286] Fps is (10 sec: 4097.1, 60 sec: 1802.2, 300 sec: 1802.2). Total num frames: 45056. Throughput: 0: 449.6. Samples: 11240. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
166 |
+
[2024-08-21 20:58:10,798][00286] Avg episode reward: [(0, '4.197')]
|
167 |
+
[2024-08-21 20:58:15,793][00286] Fps is (10 sec: 4505.6, 60 sec: 2184.5, 300 sec: 2184.5). Total num frames: 65536. Throughput: 0: 491.9. Samples: 14756. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
168 |
+
[2024-08-21 20:58:15,799][00286] Avg episode reward: [(0, '4.332')]
|
169 |
+
[2024-08-21 20:58:20,793][00286] Fps is (10 sec: 3276.6, 60 sec: 2223.5, 300 sec: 2223.5). Total num frames: 77824. Throughput: 0: 568.5. Samples: 19898. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
170 |
+
[2024-08-21 20:58:20,795][00286] Avg episode reward: [(0, '4.287')]
|
171 |
+
[2024-08-21 20:58:20,928][03214] Updated weights for policy 0, policy_version 20 (0.0036)
|
172 |
+
[2024-08-21 20:58:25,793][00286] Fps is (10 sec: 3276.8, 60 sec: 2457.6, 300 sec: 2457.6). Total num frames: 98304. Throughput: 0: 630.8. Samples: 25230. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
173 |
+
[2024-08-21 20:58:25,800][00286] Avg episode reward: [(0, '4.298')]
|
174 |
+
[2024-08-21 20:58:30,793][00286] Fps is (10 sec: 4096.2, 60 sec: 2639.6, 300 sec: 2639.6). Total num frames: 118784. Throughput: 0: 636.0. Samples: 28618. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
175 |
+
[2024-08-21 20:58:30,799][00286] Avg episode reward: [(0, '4.459')]
|
176 |
+
[2024-08-21 20:58:30,881][03197] Saving new best policy, reward=4.459!
|
177 |
+
[2024-08-21 20:58:30,897][03214] Updated weights for policy 0, policy_version 30 (0.0030)
|
178 |
+
[2024-08-21 20:58:35,794][00286] Fps is (10 sec: 4095.7, 60 sec: 2785.2, 300 sec: 2785.2). Total num frames: 139264. Throughput: 0: 765.4. Samples: 34442. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
179 |
+
[2024-08-21 20:58:35,797][00286] Avg episode reward: [(0, '4.542')]
|
180 |
+
[2024-08-21 20:58:35,806][03197] Saving new best policy, reward=4.542!
|
181 |
+
[2024-08-21 20:58:40,793][00286] Fps is (10 sec: 3276.8, 60 sec: 2755.5, 300 sec: 2755.5). Total num frames: 151552. Throughput: 0: 810.8. Samples: 38280. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
182 |
+
[2024-08-21 20:58:40,798][00286] Avg episode reward: [(0, '4.383')]
|
183 |
+
[2024-08-21 20:58:43,427][03214] Updated weights for policy 0, policy_version 40 (0.0027)
|
184 |
+
[2024-08-21 20:58:45,793][00286] Fps is (10 sec: 3277.0, 60 sec: 2867.2, 300 sec: 2867.2). Total num frames: 172032. Throughput: 0: 866.3. Samples: 41490. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
185 |
+
[2024-08-21 20:58:45,796][00286] Avg episode reward: [(0, '4.310')]
|
186 |
+
[2024-08-21 20:58:50,796][00286] Fps is (10 sec: 4094.9, 60 sec: 3208.4, 300 sec: 2961.6). Total num frames: 192512. Throughput: 0: 950.7. Samples: 47918. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
187 |
+
[2024-08-21 20:58:50,804][00286] Avg episode reward: [(0, '4.352')]
|
188 |
+
[2024-08-21 20:58:54,474][03214] Updated weights for policy 0, policy_version 50 (0.0040)
|
189 |
+
[2024-08-21 20:58:55,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3413.3, 300 sec: 2925.7). Total num frames: 204800. Throughput: 0: 909.7. Samples: 52178. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
190 |
+
[2024-08-21 20:58:55,797][00286] Avg episode reward: [(0, '4.468')]
|
191 |
+
[2024-08-21 20:59:00,793][00286] Fps is (10 sec: 3277.6, 60 sec: 3686.6, 300 sec: 3003.7). Total num frames: 225280. Throughput: 0: 884.5. Samples: 54560. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
192 |
+
[2024-08-21 20:59:00,796][00286] Avg episode reward: [(0, '4.469')]
|
193 |
+
[2024-08-21 20:59:05,167][03214] Updated weights for policy 0, policy_version 60 (0.0025)
|
194 |
+
[2024-08-21 20:59:05,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3754.7, 300 sec: 3072.0). Total num frames: 245760. Throughput: 0: 915.9. Samples: 61114. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
195 |
+
[2024-08-21 20:59:05,797][00286] Avg episode reward: [(0, '4.547')]
|
196 |
+
[2024-08-21 20:59:05,799][03197] Saving new best policy, reward=4.547!
|
197 |
+
[2024-08-21 20:59:10,795][00286] Fps is (10 sec: 3685.9, 60 sec: 3618.0, 300 sec: 3084.0). Total num frames: 262144. Throughput: 0: 914.6. Samples: 66390. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
198 |
+
[2024-08-21 20:59:10,797][00286] Avg episode reward: [(0, '4.503')]
|
199 |
+
[2024-08-21 20:59:15,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3094.8). Total num frames: 278528. Throughput: 0: 882.1. Samples: 68314. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
200 |
+
[2024-08-21 20:59:15,798][00286] Avg episode reward: [(0, '4.441')]
|
201 |
+
[2024-08-21 20:59:17,384][03214] Updated weights for policy 0, policy_version 70 (0.0025)
|
202 |
+
[2024-08-21 20:59:20,793][00286] Fps is (10 sec: 3687.0, 60 sec: 3686.4, 300 sec: 3147.4). Total num frames: 299008. Throughput: 0: 885.7. Samples: 74298. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
203 |
+
[2024-08-21 20:59:20,798][00286] Avg episode reward: [(0, '4.530')]
|
204 |
+
[2024-08-21 20:59:20,809][03197] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000073_299008.pth...
|
205 |
+
[2024-08-21 20:59:25,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3194.9). Total num frames: 319488. Throughput: 0: 936.8. Samples: 80438. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
206 |
+
[2024-08-21 20:59:25,798][00286] Avg episode reward: [(0, '4.484')]
|
207 |
+
[2024-08-21 20:59:28,396][03214] Updated weights for policy 0, policy_version 80 (0.0020)
|
208 |
+
[2024-08-21 20:59:30,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3159.8). Total num frames: 331776. Throughput: 0: 909.6. Samples: 82420. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
209 |
+
[2024-08-21 20:59:30,799][00286] Avg episode reward: [(0, '4.350')]
|
210 |
+
[2024-08-21 20:59:35,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3202.3). Total num frames: 352256. Throughput: 0: 881.0. Samples: 87562. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
211 |
+
[2024-08-21 20:59:35,796][00286] Avg episode reward: [(0, '4.396')]
|
212 |
+
[2024-08-21 20:59:38,680][03214] Updated weights for policy 0, policy_version 90 (0.0023)
|
213 |
+
[2024-08-21 20:59:40,793][00286] Fps is (10 sec: 4505.6, 60 sec: 3754.7, 300 sec: 3276.8). Total num frames: 376832. Throughput: 0: 939.4. Samples: 94452. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
214 |
+
[2024-08-21 20:59:40,796][00286] Avg episode reward: [(0, '4.391')]
|
215 |
+
[2024-08-21 20:59:45,796][00286] Fps is (10 sec: 4094.9, 60 sec: 3686.2, 300 sec: 3276.7). Total num frames: 393216. Throughput: 0: 953.1. Samples: 97452. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
|
216 |
+
[2024-08-21 20:59:45,799][00286] Avg episode reward: [(0, '4.404')]
|
217 |
+
[2024-08-21 20:59:50,263][03214] Updated weights for policy 0, policy_version 100 (0.0018)
|
218 |
+
[2024-08-21 20:59:50,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3618.3, 300 sec: 3276.8). Total num frames: 409600. Throughput: 0: 903.4. Samples: 101768. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
219 |
+
[2024-08-21 20:59:50,795][00286] Avg episode reward: [(0, '4.451')]
|
220 |
+
[2024-08-21 20:59:55,793][00286] Fps is (10 sec: 4097.1, 60 sec: 3822.9, 300 sec: 3339.8). Total num frames: 434176. Throughput: 0: 942.0. Samples: 108778. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
|
221 |
+
[2024-08-21 20:59:55,800][00286] Avg episode reward: [(0, '4.512')]
|
222 |
+
[2024-08-21 20:59:59,184][03214] Updated weights for policy 0, policy_version 110 (0.0016)
|
223 |
+
[2024-08-21 21:00:00,793][00286] Fps is (10 sec: 4505.6, 60 sec: 3822.9, 300 sec: 3367.8). Total num frames: 454656. Throughput: 0: 977.5. Samples: 112300. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
224 |
+
[2024-08-21 21:00:00,799][00286] Avg episode reward: [(0, '4.587')]
|
225 |
+
[2024-08-21 21:00:00,810][03197] Saving new best policy, reward=4.587!
|
226 |
+
[2024-08-21 21:00:05,795][00286] Fps is (10 sec: 3276.2, 60 sec: 3686.3, 300 sec: 3335.3). Total num frames: 466944. Throughput: 0: 945.5. Samples: 116846. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
227 |
+
[2024-08-21 21:00:05,798][00286] Avg episode reward: [(0, '4.472')]
|
228 |
+
[2024-08-21 21:00:10,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3754.8, 300 sec: 3361.5). Total num frames: 487424. Throughput: 0: 939.0. Samples: 122694. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
229 |
+
[2024-08-21 21:00:10,797][00286] Avg episode reward: [(0, '4.339')]
|
230 |
+
[2024-08-21 21:00:11,029][03214] Updated weights for policy 0, policy_version 120 (0.0056)
|
231 |
+
[2024-08-21 21:00:15,793][00286] Fps is (10 sec: 4506.4, 60 sec: 3891.2, 300 sec: 3413.3). Total num frames: 512000. Throughput: 0: 973.3. Samples: 126220. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
232 |
+
[2024-08-21 21:00:15,795][00286] Avg episode reward: [(0, '4.502')]
|
233 |
+
[2024-08-21 21:00:20,794][00286] Fps is (10 sec: 4095.7, 60 sec: 3822.9, 300 sec: 3408.9). Total num frames: 528384. Throughput: 0: 990.5. Samples: 132134. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
234 |
+
[2024-08-21 21:00:20,797][00286] Avg episode reward: [(0, '4.582')]
|
235 |
+
[2024-08-21 21:00:21,339][03214] Updated weights for policy 0, policy_version 130 (0.0036)
|
236 |
+
[2024-08-21 21:00:25,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3404.8). Total num frames: 544768. Throughput: 0: 943.1. Samples: 136892. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
237 |
+
[2024-08-21 21:00:25,800][00286] Avg episode reward: [(0, '4.536')]
|
238 |
+
[2024-08-21 21:00:30,793][00286] Fps is (10 sec: 4096.3, 60 sec: 3959.5, 300 sec: 3450.6). Total num frames: 569344. Throughput: 0: 952.9. Samples: 140330. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
239 |
+
[2024-08-21 21:00:30,799][00286] Avg episode reward: [(0, '4.460')]
|
240 |
+
[2024-08-21 21:00:31,571][03214] Updated weights for policy 0, policy_version 140 (0.0030)
|
241 |
+
[2024-08-21 21:00:35,793][00286] Fps is (10 sec: 4505.6, 60 sec: 3959.5, 300 sec: 3469.6). Total num frames: 589824. Throughput: 0: 1007.5. Samples: 147104. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
242 |
+
[2024-08-21 21:00:35,796][00286] Avg episode reward: [(0, '4.414')]
|
243 |
+
[2024-08-21 21:00:40,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3440.6). Total num frames: 602112. Throughput: 0: 944.8. Samples: 151294. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
244 |
+
[2024-08-21 21:00:40,795][00286] Avg episode reward: [(0, '4.398')]
|
245 |
+
[2024-08-21 21:00:43,307][03214] Updated weights for policy 0, policy_version 150 (0.0031)
|
246 |
+
[2024-08-21 21:00:45,793][00286] Fps is (10 sec: 3276.7, 60 sec: 3823.1, 300 sec: 3458.8). Total num frames: 622592. Throughput: 0: 930.9. Samples: 154190. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
247 |
+
[2024-08-21 21:00:45,799][00286] Avg episode reward: [(0, '4.218')]
|
248 |
+
[2024-08-21 21:00:50,793][00286] Fps is (10 sec: 4505.6, 60 sec: 3959.5, 300 sec: 3498.2). Total num frames: 647168. Throughput: 0: 981.4. Samples: 161008. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
249 |
+
[2024-08-21 21:00:50,796][00286] Avg episode reward: [(0, '4.429')]
|
250 |
+
[2024-08-21 21:00:53,020][03214] Updated weights for policy 0, policy_version 160 (0.0018)
|
251 |
+
[2024-08-21 21:00:55,793][00286] Fps is (10 sec: 3686.5, 60 sec: 3754.7, 300 sec: 3470.8). Total num frames: 659456. Throughput: 0: 964.9. Samples: 166116. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
252 |
+
[2024-08-21 21:00:55,794][00286] Avg episode reward: [(0, '4.558')]
|
253 |
+
[2024-08-21 21:01:00,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3486.9). Total num frames: 679936. Throughput: 0: 935.2. Samples: 168302. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
254 |
+
[2024-08-21 21:01:00,800][00286] Avg episode reward: [(0, '4.511')]
|
255 |
+
[2024-08-21 21:01:04,107][03214] Updated weights for policy 0, policy_version 170 (0.0044)
|
256 |
+
[2024-08-21 21:01:05,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3891.3, 300 sec: 3502.1). Total num frames: 700416. Throughput: 0: 949.6. Samples: 174864. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
257 |
+
[2024-08-21 21:01:05,795][00286] Avg episode reward: [(0, '4.499')]
|
258 |
+
[2024-08-21 21:01:10,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3891.2, 300 sec: 3516.6). Total num frames: 720896. Throughput: 0: 972.9. Samples: 180672. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
259 |
+
[2024-08-21 21:01:10,795][00286] Avg episode reward: [(0, '4.446')]
|
260 |
+
[2024-08-21 21:01:15,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3491.4). Total num frames: 733184. Throughput: 0: 938.0. Samples: 182540. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
261 |
+
[2024-08-21 21:01:15,798][00286] Avg episode reward: [(0, '4.304')]
|
262 |
+
[2024-08-21 21:01:16,309][03214] Updated weights for policy 0, policy_version 180 (0.0019)
|
263 |
+
[2024-08-21 21:01:20,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3505.4). Total num frames: 753664. Throughput: 0: 909.0. Samples: 188008. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
264 |
+
[2024-08-21 21:01:20,797][00286] Avg episode reward: [(0, '4.570')]
|
265 |
+
[2024-08-21 21:01:20,807][03197] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000184_753664.pth...
|
266 |
+
[2024-08-21 21:01:25,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3518.8). Total num frames: 774144. Throughput: 0: 959.8. Samples: 194484. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
267 |
+
[2024-08-21 21:01:25,798][00286] Avg episode reward: [(0, '4.722')]
|
268 |
+
[2024-08-21 21:01:25,868][03197] Saving new best policy, reward=4.722!
|
269 |
+
[2024-08-21 21:01:25,875][03214] Updated weights for policy 0, policy_version 190 (0.0035)
|
270 |
+
[2024-08-21 21:01:30,797][00286] Fps is (10 sec: 3685.0, 60 sec: 3686.2, 300 sec: 3513.4). Total num frames: 790528. Throughput: 0: 943.8. Samples: 196664. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
271 |
+
[2024-08-21 21:01:30,799][00286] Avg episode reward: [(0, '4.729')]
|
272 |
+
[2024-08-21 21:01:30,815][03197] Saving new best policy, reward=4.729!
|
273 |
+
[2024-08-21 21:01:35,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3508.3). Total num frames: 806912. Throughput: 0: 891.7. Samples: 201134. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
274 |
+
[2024-08-21 21:01:35,795][00286] Avg episode reward: [(0, '4.548')]
|
275 |
+
[2024-08-21 21:01:38,390][03214] Updated weights for policy 0, policy_version 200 (0.0025)
|
276 |
+
[2024-08-21 21:01:40,793][00286] Fps is (10 sec: 3687.8, 60 sec: 3754.7, 300 sec: 3520.8). Total num frames: 827392. Throughput: 0: 920.4. Samples: 207534. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
277 |
+
[2024-08-21 21:01:40,800][00286] Avg episode reward: [(0, '4.602')]
|
278 |
+
[2024-08-21 21:01:45,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3515.7). Total num frames: 843776. Throughput: 0: 940.9. Samples: 210644. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
279 |
+
[2024-08-21 21:01:45,795][00286] Avg episode reward: [(0, '4.556')]
|
280 |
+
[2024-08-21 21:01:50,793][00286] Fps is (10 sec: 2867.2, 60 sec: 3481.6, 300 sec: 3494.1). Total num frames: 856064. Throughput: 0: 878.3. Samples: 214388. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
281 |
+
[2024-08-21 21:01:50,800][00286] Avg episode reward: [(0, '4.568')]
|
282 |
+
[2024-08-21 21:01:50,913][03214] Updated weights for policy 0, policy_version 210 (0.0023)
|
283 |
+
[2024-08-21 21:01:55,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3522.6). Total num frames: 880640. Throughput: 0: 879.6. Samples: 220254. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
284 |
+
[2024-08-21 21:01:55,795][00286] Avg episode reward: [(0, '4.447')]
|
285 |
+
[2024-08-21 21:02:00,421][03214] Updated weights for policy 0, policy_version 220 (0.0029)
|
286 |
+
[2024-08-21 21:02:00,793][00286] Fps is (10 sec: 4505.6, 60 sec: 3686.4, 300 sec: 3533.8). Total num frames: 901120. Throughput: 0: 911.2. Samples: 223544. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
287 |
+
[2024-08-21 21:02:00,795][00286] Avg episode reward: [(0, '4.346')]
|
288 |
+
[2024-08-21 21:02:05,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3513.1). Total num frames: 913408. Throughput: 0: 895.7. Samples: 228316. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
289 |
+
[2024-08-21 21:02:05,797][00286] Avg episode reward: [(0, '4.632')]
|
290 |
+
[2024-08-21 21:02:10,793][00286] Fps is (10 sec: 2867.2, 60 sec: 3481.6, 300 sec: 3508.6). Total num frames: 929792. Throughput: 0: 858.3. Samples: 233106. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
291 |
+
[2024-08-21 21:02:10,800][00286] Avg episode reward: [(0, '4.572')]
|
292 |
+
[2024-08-21 21:02:12,996][03214] Updated weights for policy 0, policy_version 230 (0.0029)
|
293 |
+
[2024-08-21 21:02:15,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3519.5). Total num frames: 950272. Throughput: 0: 881.1. Samples: 236310. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
294 |
+
[2024-08-21 21:02:15,798][00286] Avg episode reward: [(0, '4.622')]
|
295 |
+
[2024-08-21 21:02:20,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3530.0). Total num frames: 970752. Throughput: 0: 917.6. Samples: 242426. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
296 |
+
[2024-08-21 21:02:20,795][00286] Avg episode reward: [(0, '4.617')]
|
297 |
+
[2024-08-21 21:02:25,039][03214] Updated weights for policy 0, policy_version 240 (0.0034)
|
298 |
+
[2024-08-21 21:02:25,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3510.9). Total num frames: 983040. Throughput: 0: 867.7. Samples: 246580. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
299 |
+
[2024-08-21 21:02:25,795][00286] Avg episode reward: [(0, '4.463')]
|
300 |
+
[2024-08-21 21:02:30,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3550.1, 300 sec: 3521.1). Total num frames: 1003520. Throughput: 0: 867.5. Samples: 249682. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
301 |
+
[2024-08-21 21:02:30,799][00286] Avg episode reward: [(0, '4.453')]
|
302 |
+
[2024-08-21 21:02:35,068][03214] Updated weights for policy 0, policy_version 250 (0.0022)
|
303 |
+
[2024-08-21 21:02:35,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3531.0). Total num frames: 1024000. Throughput: 0: 920.2. Samples: 255798. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
304 |
+
[2024-08-21 21:02:35,795][00286] Avg episode reward: [(0, '4.687')]
|
305 |
+
[2024-08-21 21:02:40,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3512.8). Total num frames: 1036288. Throughput: 0: 880.8. Samples: 259890. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
306 |
+
[2024-08-21 21:02:40,800][00286] Avg episode reward: [(0, '4.699')]
|
307 |
+
[2024-08-21 21:02:45,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3582.3). Total num frames: 1056768. Throughput: 0: 859.2. Samples: 262208. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
308 |
+
[2024-08-21 21:02:45,800][00286] Avg episode reward: [(0, '4.676')]
|
309 |
+
[2024-08-21 21:02:47,857][03214] Updated weights for policy 0, policy_version 260 (0.0025)
|
310 |
+
[2024-08-21 21:02:50,793][00286] Fps is (10 sec: 4096.1, 60 sec: 3686.4, 300 sec: 3651.7). Total num frames: 1077248. Throughput: 0: 892.8. Samples: 268492. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
311 |
+
[2024-08-21 21:02:50,795][00286] Avg episode reward: [(0, '4.592')]
|
312 |
+
[2024-08-21 21:02:55,793][00286] Fps is (10 sec: 3276.7, 60 sec: 3481.6, 300 sec: 3679.5). Total num frames: 1089536. Throughput: 0: 899.2. Samples: 273570. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
313 |
+
[2024-08-21 21:02:55,796][00286] Avg episode reward: [(0, '4.587')]
|
314 |
+
[2024-08-21 21:03:00,259][03214] Updated weights for policy 0, policy_version 270 (0.0021)
|
315 |
+
[2024-08-21 21:03:00,793][00286] Fps is (10 sec: 2867.1, 60 sec: 3413.3, 300 sec: 3679.5). Total num frames: 1105920. Throughput: 0: 871.0. Samples: 275504. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
316 |
+
[2024-08-21 21:03:00,801][00286] Avg episode reward: [(0, '4.493')]
|
317 |
+
[2024-08-21 21:03:05,793][00286] Fps is (10 sec: 3686.5, 60 sec: 3549.9, 300 sec: 3665.6). Total num frames: 1126400. Throughput: 0: 865.6. Samples: 281376. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
318 |
+
[2024-08-21 21:03:05,795][00286] Avg episode reward: [(0, '4.453')]
|
319 |
+
[2024-08-21 21:03:09,410][03214] Updated weights for policy 0, policy_version 280 (0.0022)
|
320 |
+
[2024-08-21 21:03:10,793][00286] Fps is (10 sec: 4505.6, 60 sec: 3686.4, 300 sec: 3679.5). Total num frames: 1150976. Throughput: 0: 920.2. Samples: 287988. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
321 |
+
[2024-08-21 21:03:10,796][00286] Avg episode reward: [(0, '4.547')]
|
322 |
+
[2024-08-21 21:03:15,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3679.5). Total num frames: 1163264. Throughput: 0: 899.2. Samples: 290144. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
323 |
+
[2024-08-21 21:03:15,801][00286] Avg episode reward: [(0, '4.799')]
|
324 |
+
[2024-08-21 21:03:15,802][03197] Saving new best policy, reward=4.799!
|
325 |
+
[2024-08-21 21:03:20,793][00286] Fps is (10 sec: 3276.9, 60 sec: 3549.9, 300 sec: 3679.5). Total num frames: 1183744. Throughput: 0: 879.8. Samples: 295390. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
326 |
+
[2024-08-21 21:03:20,797][00286] Avg episode reward: [(0, '4.723')]
|
327 |
+
[2024-08-21 21:03:20,811][03197] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000289_1183744.pth...
|
328 |
+
[2024-08-21 21:03:20,931][03197] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000073_299008.pth
|
329 |
+
[2024-08-21 21:03:21,149][03214] Updated weights for policy 0, policy_version 290 (0.0014)
|
330 |
+
[2024-08-21 21:03:25,793][00286] Fps is (10 sec: 4505.6, 60 sec: 3754.7, 300 sec: 3693.3). Total num frames: 1208320. Throughput: 0: 944.0. Samples: 302372. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
331 |
+
[2024-08-21 21:03:25,801][00286] Avg episode reward: [(0, '4.635')]
|
332 |
+
[2024-08-21 21:03:30,794][00286] Fps is (10 sec: 4095.6, 60 sec: 3686.3, 300 sec: 3679.5). Total num frames: 1224704. Throughput: 0: 955.4. Samples: 305202. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
333 |
+
[2024-08-21 21:03:30,796][00286] Avg episode reward: [(0, '4.446')]
|
334 |
+
[2024-08-21 21:03:32,106][03214] Updated weights for policy 0, policy_version 300 (0.0017)
|
335 |
+
[2024-08-21 21:03:35,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3693.3). Total num frames: 1241088. Throughput: 0: 908.4. Samples: 309370. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
336 |
+
[2024-08-21 21:03:35,800][00286] Avg episode reward: [(0, '4.832')]
|
337 |
+
[2024-08-21 21:03:35,803][03197] Saving new best policy, reward=4.832!
|
338 |
+
[2024-08-21 21:03:40,793][00286] Fps is (10 sec: 3686.6, 60 sec: 3754.7, 300 sec: 3693.3). Total num frames: 1261568. Throughput: 0: 941.0. Samples: 315916. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
339 |
+
[2024-08-21 21:03:40,796][00286] Avg episode reward: [(0, '4.931')]
|
340 |
+
[2024-08-21 21:03:40,808][03197] Saving new best policy, reward=4.931!
|
341 |
+
[2024-08-21 21:03:42,325][03214] Updated weights for policy 0, policy_version 310 (0.0023)
|
342 |
+
[2024-08-21 21:03:45,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3754.7, 300 sec: 3693.4). Total num frames: 1282048. Throughput: 0: 968.6. Samples: 319092. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
343 |
+
[2024-08-21 21:03:45,799][00286] Avg episode reward: [(0, '4.872')]
|
344 |
+
[2024-08-21 21:03:50,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3693.3). Total num frames: 1294336. Throughput: 0: 942.5. Samples: 323788. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
345 |
+
[2024-08-21 21:03:50,800][00286] Avg episode reward: [(0, '4.853')]
|
346 |
+
[2024-08-21 21:03:54,229][03214] Updated weights for policy 0, policy_version 320 (0.0034)
|
347 |
+
[2024-08-21 21:03:55,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3693.3). Total num frames: 1314816. Throughput: 0: 922.8. Samples: 329512. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
348 |
+
[2024-08-21 21:03:55,796][00286] Avg episode reward: [(0, '4.775')]
|
349 |
+
[2024-08-21 21:04:00,793][00286] Fps is (10 sec: 4096.1, 60 sec: 3822.9, 300 sec: 3693.3). Total num frames: 1335296. Throughput: 0: 947.5. Samples: 332780. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
350 |
+
[2024-08-21 21:04:00,803][00286] Avg episode reward: [(0, '4.609')]
|
351 |
+
[2024-08-21 21:04:04,545][03214] Updated weights for policy 0, policy_version 330 (0.0031)
|
352 |
+
[2024-08-21 21:04:05,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3754.7, 300 sec: 3693.4). Total num frames: 1351680. Throughput: 0: 953.0. Samples: 338274. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
353 |
+
[2024-08-21 21:04:05,799][00286] Avg episode reward: [(0, '4.749')]
|
354 |
+
[2024-08-21 21:04:10,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3618.2, 300 sec: 3693.3). Total num frames: 1368064. Throughput: 0: 893.6. Samples: 342586. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
355 |
+
[2024-08-21 21:04:10,802][00286] Avg episode reward: [(0, '4.805')]
|
356 |
+
[2024-08-21 21:04:15,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3754.7, 300 sec: 3693.3). Total num frames: 1388544. Throughput: 0: 902.9. Samples: 345830. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
357 |
+
[2024-08-21 21:04:15,795][00286] Avg episode reward: [(0, '4.783')]
|
358 |
+
[2024-08-21 21:04:15,834][03214] Updated weights for policy 0, policy_version 340 (0.0031)
|
359 |
+
[2024-08-21 21:04:20,798][00286] Fps is (10 sec: 4094.1, 60 sec: 3754.4, 300 sec: 3693.3). Total num frames: 1409024. Throughput: 0: 959.1. Samples: 352534. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
360 |
+
[2024-08-21 21:04:20,800][00286] Avg episode reward: [(0, '4.514')]
|
361 |
+
[2024-08-21 21:04:25,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3707.2). Total num frames: 1425408. Throughput: 0: 904.0. Samples: 356596. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
362 |
+
[2024-08-21 21:04:25,799][00286] Avg episode reward: [(0, '4.449')]
|
363 |
+
[2024-08-21 21:04:28,138][03214] Updated weights for policy 0, policy_version 350 (0.0025)
|
364 |
+
[2024-08-21 21:04:30,793][00286] Fps is (10 sec: 3278.3, 60 sec: 3618.2, 300 sec: 3693.3). Total num frames: 1441792. Throughput: 0: 893.2. Samples: 359284. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
365 |
+
[2024-08-21 21:04:30,795][00286] Avg episode reward: [(0, '4.323')]
|
366 |
+
[2024-08-21 21:04:35,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3754.7, 300 sec: 3693.3). Total num frames: 1466368. Throughput: 0: 928.4. Samples: 365564. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
367 |
+
[2024-08-21 21:04:35,795][00286] Avg episode reward: [(0, '4.564')]
|
368 |
+
[2024-08-21 21:04:38,708][03214] Updated weights for policy 0, policy_version 360 (0.0024)
|
369 |
+
[2024-08-21 21:04:40,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3679.5). Total num frames: 1478656. Throughput: 0: 906.0. Samples: 370282. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
370 |
+
[2024-08-21 21:04:40,800][00286] Avg episode reward: [(0, '4.856')]
|
371 |
+
[2024-08-21 21:04:45,793][00286] Fps is (10 sec: 2867.2, 60 sec: 3549.9, 300 sec: 3679.5). Total num frames: 1495040. Throughput: 0: 876.6. Samples: 372228. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
372 |
+
[2024-08-21 21:04:45,799][00286] Avg episode reward: [(0, '4.814')]
|
373 |
+
[2024-08-21 21:04:50,358][03214] Updated weights for policy 0, policy_version 370 (0.0021)
|
374 |
+
[2024-08-21 21:04:50,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3665.6). Total num frames: 1515520. Throughput: 0: 890.9. Samples: 378364. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
|
375 |
+
[2024-08-21 21:04:50,795][00286] Avg episode reward: [(0, '4.520')]
|
376 |
+
[2024-08-21 21:04:55,795][00286] Fps is (10 sec: 4095.3, 60 sec: 3686.3, 300 sec: 3665.6). Total num frames: 1536000. Throughput: 0: 923.7. Samples: 384152. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
377 |
+
[2024-08-21 21:04:55,797][00286] Avg episode reward: [(0, '4.371')]
|
378 |
+
[2024-08-21 21:05:00,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3665.6). Total num frames: 1548288. Throughput: 0: 893.5. Samples: 386036. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
379 |
+
[2024-08-21 21:05:00,795][00286] Avg episode reward: [(0, '4.381')]
|
380 |
+
[2024-08-21 21:05:02,521][03214] Updated weights for policy 0, policy_version 380 (0.0032)
|
381 |
+
[2024-08-21 21:05:05,793][00286] Fps is (10 sec: 3277.3, 60 sec: 3618.1, 300 sec: 3665.6). Total num frames: 1568768. Throughput: 0: 863.4. Samples: 391382. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
382 |
+
[2024-08-21 21:05:05,800][00286] Avg episode reward: [(0, '4.561')]
|
383 |
+
[2024-08-21 21:05:10,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3651.7). Total num frames: 1589248. Throughput: 0: 916.5. Samples: 397838. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
384 |
+
[2024-08-21 21:05:10,796][00286] Avg episode reward: [(0, '4.514')]
|
385 |
+
[2024-08-21 21:05:13,088][03214] Updated weights for policy 0, policy_version 390 (0.0041)
|
386 |
+
[2024-08-21 21:05:15,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3637.8). Total num frames: 1601536. Throughput: 0: 904.1. Samples: 399970. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
387 |
+
[2024-08-21 21:05:15,796][00286] Avg episode reward: [(0, '4.478')]
|
388 |
+
[2024-08-21 21:05:20,796][00286] Fps is (10 sec: 3275.9, 60 sec: 3550.0, 300 sec: 3651.7). Total num frames: 1622016. Throughput: 0: 865.4. Samples: 404508. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
389 |
+
[2024-08-21 21:05:20,800][00286] Avg episode reward: [(0, '4.586')]
|
390 |
+
[2024-08-21 21:05:20,816][03197] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000396_1622016.pth...
|
391 |
+
[2024-08-21 21:05:20,967][03197] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000184_753664.pth
|
392 |
+
[2024-08-21 21:05:24,394][03214] Updated weights for policy 0, policy_version 400 (0.0027)
|
393 |
+
[2024-08-21 21:05:25,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3637.8). Total num frames: 1642496. Throughput: 0: 904.5. Samples: 410984. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
394 |
+
[2024-08-21 21:05:25,798][00286] Avg episode reward: [(0, '4.758')]
|
395 |
+
[2024-08-21 21:05:30,793][00286] Fps is (10 sec: 3687.4, 60 sec: 3618.1, 300 sec: 3623.9). Total num frames: 1658880. Throughput: 0: 931.6. Samples: 414152. Policy #0 lag: (min: 0.0, avg: 0.3, max: 2.0)
|
396 |
+
[2024-08-21 21:05:30,799][00286] Avg episode reward: [(0, '4.653')]
|
397 |
+
[2024-08-21 21:05:35,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3637.8). Total num frames: 1675264. Throughput: 0: 885.8. Samples: 418226. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
398 |
+
[2024-08-21 21:05:35,797][00286] Avg episode reward: [(0, '4.840')]
|
399 |
+
[2024-08-21 21:05:36,643][03214] Updated weights for policy 0, policy_version 410 (0.0043)
|
400 |
+
[2024-08-21 21:05:40,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3637.8). Total num frames: 1695744. Throughput: 0: 892.8. Samples: 424328. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
401 |
+
[2024-08-21 21:05:40,795][00286] Avg episode reward: [(0, '4.892')]
|
402 |
+
[2024-08-21 21:05:45,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3623.9). Total num frames: 1716224. Throughput: 0: 925.8. Samples: 427696. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
403 |
+
[2024-08-21 21:05:45,795][00286] Avg episode reward: [(0, '4.766')]
|
404 |
+
[2024-08-21 21:05:46,052][03214] Updated weights for policy 0, policy_version 420 (0.0033)
|
405 |
+
[2024-08-21 21:05:50,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3637.8). Total num frames: 1732608. Throughput: 0: 920.2. Samples: 432792. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
406 |
+
[2024-08-21 21:05:50,803][00286] Avg episode reward: [(0, '4.631')]
|
407 |
+
[2024-08-21 21:05:55,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3550.0, 300 sec: 3623.9). Total num frames: 1748992. Throughput: 0: 892.0. Samples: 437978. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
408 |
+
[2024-08-21 21:05:55,799][00286] Avg episode reward: [(0, '4.812')]
|
409 |
+
[2024-08-21 21:05:57,807][03214] Updated weights for policy 0, policy_version 430 (0.0028)
|
410 |
+
[2024-08-21 21:06:00,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3754.7, 300 sec: 3637.8). Total num frames: 1773568. Throughput: 0: 919.3. Samples: 441338. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
411 |
+
[2024-08-21 21:06:00,800][00286] Avg episode reward: [(0, '4.980')]
|
412 |
+
[2024-08-21 21:06:00,810][03197] Saving new best policy, reward=4.980!
|
413 |
+
[2024-08-21 21:06:05,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3623.9). Total num frames: 1789952. Throughput: 0: 952.7. Samples: 447378. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
414 |
+
[2024-08-21 21:06:05,795][00286] Avg episode reward: [(0, '4.824')]
|
415 |
+
[2024-08-21 21:06:09,737][03214] Updated weights for policy 0, policy_version 440 (0.0044)
|
416 |
+
[2024-08-21 21:06:10,793][00286] Fps is (10 sec: 2867.2, 60 sec: 3549.9, 300 sec: 3623.9). Total num frames: 1802240. Throughput: 0: 901.2. Samples: 451540. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
417 |
+
[2024-08-21 21:06:10,800][00286] Avg episode reward: [(0, '4.985')]
|
418 |
+
[2024-08-21 21:06:10,911][03197] Saving new best policy, reward=4.985!
|
419 |
+
[2024-08-21 21:06:15,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3754.7, 300 sec: 3637.8). Total num frames: 1826816. Throughput: 0: 903.8. Samples: 454824. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
420 |
+
[2024-08-21 21:06:15,795][00286] Avg episode reward: [(0, '4.873')]
|
421 |
+
[2024-08-21 21:06:19,123][03214] Updated weights for policy 0, policy_version 450 (0.0015)
|
422 |
+
[2024-08-21 21:06:20,793][00286] Fps is (10 sec: 4505.7, 60 sec: 3754.8, 300 sec: 3637.8). Total num frames: 1847296. Throughput: 0: 960.4. Samples: 461444. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
423 |
+
[2024-08-21 21:06:20,799][00286] Avg episode reward: [(0, '4.753')]
|
424 |
+
[2024-08-21 21:06:25,794][00286] Fps is (10 sec: 3276.5, 60 sec: 3618.1, 300 sec: 3624.0). Total num frames: 1859584. Throughput: 0: 922.5. Samples: 465842. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
425 |
+
[2024-08-21 21:06:25,796][00286] Avg episode reward: [(0, '4.769')]
|
426 |
+
[2024-08-21 21:06:30,793][00286] Fps is (10 sec: 3276.7, 60 sec: 3686.4, 300 sec: 3637.8). Total num frames: 1880064. Throughput: 0: 904.4. Samples: 468396. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
427 |
+
[2024-08-21 21:06:30,799][00286] Avg episode reward: [(0, '4.800')]
|
428 |
+
[2024-08-21 21:06:31,119][03214] Updated weights for policy 0, policy_version 460 (0.0027)
|
429 |
+
[2024-08-21 21:06:35,793][00286] Fps is (10 sec: 4506.1, 60 sec: 3822.9, 300 sec: 3651.7). Total num frames: 1904640. Throughput: 0: 943.2. Samples: 475238. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
430 |
+
[2024-08-21 21:06:35,794][00286] Avg episode reward: [(0, '4.735')]
|
431 |
+
[2024-08-21 21:06:40,797][00286] Fps is (10 sec: 4094.5, 60 sec: 3754.4, 300 sec: 3651.6). Total num frames: 1921024. Throughput: 0: 947.7. Samples: 480628. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
432 |
+
[2024-08-21 21:06:40,799][00286] Avg episode reward: [(0, '4.923')]
|
433 |
+
[2024-08-21 21:06:42,095][03214] Updated weights for policy 0, policy_version 470 (0.0023)
|
434 |
+
[2024-08-21 21:06:45,794][00286] Fps is (10 sec: 3276.5, 60 sec: 3686.4, 300 sec: 3665.6). Total num frames: 1937408. Throughput: 0: 918.4. Samples: 482668. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
435 |
+
[2024-08-21 21:06:45,798][00286] Avg episode reward: [(0, '4.811')]
|
436 |
+
[2024-08-21 21:06:50,793][00286] Fps is (10 sec: 3687.8, 60 sec: 3754.7, 300 sec: 3651.7). Total num frames: 1957888. Throughput: 0: 924.8. Samples: 488994. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
437 |
+
[2024-08-21 21:06:50,795][00286] Avg episode reward: [(0, '4.706')]
|
438 |
+
[2024-08-21 21:06:52,030][03214] Updated weights for policy 0, policy_version 480 (0.0037)
|
439 |
+
[2024-08-21 21:06:55,793][00286] Fps is (10 sec: 4096.3, 60 sec: 3822.9, 300 sec: 3651.7). Total num frames: 1978368. Throughput: 0: 976.4. Samples: 495476. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
440 |
+
[2024-08-21 21:06:55,799][00286] Avg episode reward: [(0, '4.792')]
|
441 |
+
[2024-08-21 21:07:00,794][00286] Fps is (10 sec: 3276.5, 60 sec: 3618.1, 300 sec: 3651.7). Total num frames: 1990656. Throughput: 0: 946.1. Samples: 497400. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
442 |
+
[2024-08-21 21:07:00,801][00286] Avg episode reward: [(0, '4.774')]
|
443 |
+
[2024-08-21 21:07:03,933][03214] Updated weights for policy 0, policy_version 490 (0.0030)
|
444 |
+
[2024-08-21 21:07:05,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3665.6). Total num frames: 2011136. Throughput: 0: 918.1. Samples: 502758. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
445 |
+
[2024-08-21 21:07:05,795][00286] Avg episode reward: [(0, '4.818')]
|
446 |
+
[2024-08-21 21:07:10,793][00286] Fps is (10 sec: 4505.9, 60 sec: 3891.2, 300 sec: 3679.5). Total num frames: 2035712. Throughput: 0: 966.8. Samples: 509346. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
447 |
+
[2024-08-21 21:07:10,798][00286] Avg episode reward: [(0, '4.651')]
|
448 |
+
[2024-08-21 21:07:14,204][03214] Updated weights for policy 0, policy_version 500 (0.0036)
|
449 |
+
[2024-08-21 21:07:15,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3754.7, 300 sec: 3665.6). Total num frames: 2052096. Throughput: 0: 967.8. Samples: 511948. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
450 |
+
[2024-08-21 21:07:15,801][00286] Avg episode reward: [(0, '4.586')]
|
451 |
+
[2024-08-21 21:07:20,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3679.5). Total num frames: 2068480. Throughput: 0: 910.4. Samples: 516208. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
452 |
+
[2024-08-21 21:07:20,795][00286] Avg episode reward: [(0, '4.539')]
|
453 |
+
[2024-08-21 21:07:20,804][03197] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000505_2068480.pth...
|
454 |
+
[2024-08-21 21:07:20,956][03197] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000289_1183744.pth
|
455 |
+
[2024-08-21 21:07:25,146][03214] Updated weights for policy 0, policy_version 510 (0.0053)
|
456 |
+
[2024-08-21 21:07:25,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3823.0, 300 sec: 3679.5). Total num frames: 2088960. Throughput: 0: 938.2. Samples: 522842. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
457 |
+
[2024-08-21 21:07:25,797][00286] Avg episode reward: [(0, '4.581')]
|
458 |
+
[2024-08-21 21:07:30,796][00286] Fps is (10 sec: 4094.9, 60 sec: 3822.8, 300 sec: 3679.4). Total num frames: 2109440. Throughput: 0: 970.0. Samples: 526318. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
459 |
+
[2024-08-21 21:07:30,803][00286] Avg episode reward: [(0, '4.780')]
|
460 |
+
[2024-08-21 21:07:35,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3679.5). Total num frames: 2121728. Throughput: 0: 927.5. Samples: 530732. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
461 |
+
[2024-08-21 21:07:35,804][00286] Avg episode reward: [(0, '4.717')]
|
462 |
+
[2024-08-21 21:07:37,403][03214] Updated weights for policy 0, policy_version 520 (0.0020)
|
463 |
+
[2024-08-21 21:07:40,793][00286] Fps is (10 sec: 3277.7, 60 sec: 3686.6, 300 sec: 3679.5). Total num frames: 2142208. Throughput: 0: 909.9. Samples: 536420. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
464 |
+
[2024-08-21 21:07:40,798][00286] Avg episode reward: [(0, '4.620')]
|
465 |
+
[2024-08-21 21:07:45,793][00286] Fps is (10 sec: 4505.5, 60 sec: 3823.0, 300 sec: 3693.3). Total num frames: 2166784. Throughput: 0: 944.9. Samples: 539922. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
466 |
+
[2024-08-21 21:07:45,796][00286] Avg episode reward: [(0, '4.498')]
|
467 |
+
[2024-08-21 21:07:46,300][03214] Updated weights for policy 0, policy_version 530 (0.0035)
|
468 |
+
[2024-08-21 21:07:50,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3754.7, 300 sec: 3707.2). Total num frames: 2183168. Throughput: 0: 950.0. Samples: 545510. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
469 |
+
[2024-08-21 21:07:50,801][00286] Avg episode reward: [(0, '4.733')]
|
470 |
+
[2024-08-21 21:07:55,793][00286] Fps is (10 sec: 3276.9, 60 sec: 3686.4, 300 sec: 3707.2). Total num frames: 2199552. Throughput: 0: 913.9. Samples: 550470. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
471 |
+
[2024-08-21 21:07:55,799][00286] Avg episode reward: [(0, '4.575')]
|
472 |
+
[2024-08-21 21:07:57,949][03214] Updated weights for policy 0, policy_version 540 (0.0016)
|
473 |
+
[2024-08-21 21:08:00,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3891.3, 300 sec: 3721.1). Total num frames: 2224128. Throughput: 0: 933.4. Samples: 553950. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
474 |
+
[2024-08-21 21:08:00,798][00286] Avg episode reward: [(0, '4.449')]
|
475 |
+
[2024-08-21 21:08:05,794][00286] Fps is (10 sec: 4095.7, 60 sec: 3822.9, 300 sec: 3693.3). Total num frames: 2240512. Throughput: 0: 978.5. Samples: 560242. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
476 |
+
[2024-08-21 21:08:05,796][00286] Avg episode reward: [(0, '4.618')]
|
477 |
+
[2024-08-21 21:08:09,677][03214] Updated weights for policy 0, policy_version 550 (0.0031)
|
478 |
+
[2024-08-21 21:08:10,795][00286] Fps is (10 sec: 2866.5, 60 sec: 3618.0, 300 sec: 3693.3). Total num frames: 2252800. Throughput: 0: 921.9. Samples: 564330. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
479 |
+
[2024-08-21 21:08:10,798][00286] Avg episode reward: [(0, '4.726')]
|
480 |
+
[2024-08-21 21:08:15,793][00286] Fps is (10 sec: 3686.6, 60 sec: 3754.7, 300 sec: 3707.2). Total num frames: 2277376. Throughput: 0: 909.5. Samples: 567242. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
481 |
+
[2024-08-21 21:08:15,799][00286] Avg episode reward: [(0, '4.685')]
|
482 |
+
[2024-08-21 21:08:19,591][03214] Updated weights for policy 0, policy_version 560 (0.0032)
|
483 |
+
[2024-08-21 21:08:20,793][00286] Fps is (10 sec: 4506.7, 60 sec: 3822.9, 300 sec: 3693.3). Total num frames: 2297856. Throughput: 0: 957.2. Samples: 573808. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
484 |
+
[2024-08-21 21:08:20,795][00286] Avg episode reward: [(0, '4.646')]
|
485 |
+
[2024-08-21 21:08:25,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3679.5). Total num frames: 2310144. Throughput: 0: 934.6. Samples: 578478. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
486 |
+
[2024-08-21 21:08:25,799][00286] Avg episode reward: [(0, '4.658')]
|
487 |
+
[2024-08-21 21:08:30,793][00286] Fps is (10 sec: 2867.2, 60 sec: 3618.3, 300 sec: 3679.5). Total num frames: 2326528. Throughput: 0: 901.6. Samples: 580496. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
488 |
+
[2024-08-21 21:08:30,796][00286] Avg episode reward: [(0, '4.651')]
|
489 |
+
[2024-08-21 21:08:31,724][03214] Updated weights for policy 0, policy_version 570 (0.0021)
|
490 |
+
[2024-08-21 21:08:35,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3693.3). Total num frames: 2351104. Throughput: 0: 922.7. Samples: 587032. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
491 |
+
[2024-08-21 21:08:35,795][00286] Avg episode reward: [(0, '4.745')]
|
492 |
+
[2024-08-21 21:08:40,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3754.7, 300 sec: 3679.5). Total num frames: 2367488. Throughput: 0: 940.2. Samples: 592778. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
493 |
+
[2024-08-21 21:08:40,796][00286] Avg episode reward: [(0, '4.706')]
|
494 |
+
[2024-08-21 21:08:42,850][03214] Updated weights for policy 0, policy_version 580 (0.0027)
|
495 |
+
[2024-08-21 21:08:45,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3693.3). Total num frames: 2383872. Throughput: 0: 907.0. Samples: 594766. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
496 |
+
[2024-08-21 21:08:45,803][00286] Avg episode reward: [(0, '4.489')]
|
497 |
+
[2024-08-21 21:08:50,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3693.3). Total num frames: 2404352. Throughput: 0: 893.7. Samples: 600458. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
498 |
+
[2024-08-21 21:08:50,795][00286] Avg episode reward: [(0, '4.449')]
|
499 |
+
[2024-08-21 21:08:53,162][03214] Updated weights for policy 0, policy_version 590 (0.0029)
|
500 |
+
[2024-08-21 21:08:55,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3754.7, 300 sec: 3693.3). Total num frames: 2424832. Throughput: 0: 952.2. Samples: 607176. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
501 |
+
[2024-08-21 21:08:55,795][00286] Avg episode reward: [(0, '4.431')]
|
502 |
+
[2024-08-21 21:09:00,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3693.3). Total num frames: 2441216. Throughput: 0: 939.9. Samples: 609536. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
503 |
+
[2024-08-21 21:09:00,800][00286] Avg episode reward: [(0, '4.480')]
|
504 |
+
[2024-08-21 21:09:04,975][03214] Updated weights for policy 0, policy_version 600 (0.0029)
|
505 |
+
[2024-08-21 21:09:05,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3618.2, 300 sec: 3693.3). Total num frames: 2457600. Throughput: 0: 897.4. Samples: 614192. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
506 |
+
[2024-08-21 21:09:05,802][00286] Avg episode reward: [(0, '4.540')]
|
507 |
+
[2024-08-21 21:09:10,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3823.1, 300 sec: 3707.2). Total num frames: 2482176. Throughput: 0: 935.7. Samples: 620586. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
508 |
+
[2024-08-21 21:09:10,795][00286] Avg episode reward: [(0, '4.610')]
|
509 |
+
[2024-08-21 21:09:15,295][03214] Updated weights for policy 0, policy_version 610 (0.0035)
|
510 |
+
[2024-08-21 21:09:15,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3693.4). Total num frames: 2498560. Throughput: 0: 967.1. Samples: 624016. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
511 |
+
[2024-08-21 21:09:15,796][00286] Avg episode reward: [(0, '4.600')]
|
512 |
+
[2024-08-21 21:09:20,793][00286] Fps is (10 sec: 2867.2, 60 sec: 3549.9, 300 sec: 3679.5). Total num frames: 2510848. Throughput: 0: 908.8. Samples: 627930. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
513 |
+
[2024-08-21 21:09:20,795][00286] Avg episode reward: [(0, '4.601')]
|
514 |
+
[2024-08-21 21:09:20,808][03197] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000613_2510848.pth...
|
515 |
+
[2024-08-21 21:09:20,936][03197] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000396_1622016.pth
|
516 |
+
[2024-08-21 21:09:25,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3693.3). Total num frames: 2531328. Throughput: 0: 913.4. Samples: 633880. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
517 |
+
[2024-08-21 21:09:25,797][00286] Avg episode reward: [(0, '4.680')]
|
518 |
+
[2024-08-21 21:09:26,813][03214] Updated weights for policy 0, policy_version 620 (0.0027)
|
519 |
+
[2024-08-21 21:09:30,793][00286] Fps is (10 sec: 4505.6, 60 sec: 3822.9, 300 sec: 3693.3). Total num frames: 2555904. Throughput: 0: 942.4. Samples: 637172. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
520 |
+
[2024-08-21 21:09:30,795][00286] Avg episode reward: [(0, '4.894')]
|
521 |
+
[2024-08-21 21:09:35,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3693.3). Total num frames: 2568192. Throughput: 0: 925.6. Samples: 642110. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
522 |
+
[2024-08-21 21:09:35,795][00286] Avg episode reward: [(0, '4.763')]
|
523 |
+
[2024-08-21 21:09:39,152][03214] Updated weights for policy 0, policy_version 630 (0.0018)
|
524 |
+
[2024-08-21 21:09:40,793][00286] Fps is (10 sec: 2867.2, 60 sec: 3618.1, 300 sec: 3693.3). Total num frames: 2584576. Throughput: 0: 884.7. Samples: 646988. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
525 |
+
[2024-08-21 21:09:40,800][00286] Avg episode reward: [(0, '4.808')]
|
526 |
+
[2024-08-21 21:09:45,793][00286] Fps is (10 sec: 4095.9, 60 sec: 3754.6, 300 sec: 3707.2). Total num frames: 2609152. Throughput: 0: 903.5. Samples: 650192. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
527 |
+
[2024-08-21 21:09:45,798][00286] Avg episode reward: [(0, '4.930')]
|
528 |
+
[2024-08-21 21:09:48,756][03214] Updated weights for policy 0, policy_version 640 (0.0022)
|
529 |
+
[2024-08-21 21:09:50,793][00286] Fps is (10 sec: 4096.1, 60 sec: 3686.4, 300 sec: 3693.4). Total num frames: 2625536. Throughput: 0: 935.4. Samples: 656286. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
530 |
+
[2024-08-21 21:09:50,796][00286] Avg episode reward: [(0, '4.789')]
|
531 |
+
[2024-08-21 21:09:55,793][00286] Fps is (10 sec: 2867.3, 60 sec: 3549.9, 300 sec: 3693.3). Total num frames: 2637824. Throughput: 0: 885.9. Samples: 660450. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
532 |
+
[2024-08-21 21:09:55,794][00286] Avg episode reward: [(0, '4.819')]
|
533 |
+
[2024-08-21 21:10:00,515][03214] Updated weights for policy 0, policy_version 650 (0.0017)
|
534 |
+
[2024-08-21 21:10:00,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3707.2). Total num frames: 2662400. Throughput: 0: 881.6. Samples: 663688. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
|
535 |
+
[2024-08-21 21:10:00,795][00286] Avg episode reward: [(0, '5.059')]
|
536 |
+
[2024-08-21 21:10:00,812][03197] Saving new best policy, reward=5.059!
|
537 |
+
[2024-08-21 21:10:05,795][00286] Fps is (10 sec: 4504.8, 60 sec: 3754.6, 300 sec: 3707.2). Total num frames: 2682880. Throughput: 0: 940.6. Samples: 670258. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
538 |
+
[2024-08-21 21:10:05,799][00286] Avg episode reward: [(0, '4.912')]
|
539 |
+
[2024-08-21 21:10:10,797][00286] Fps is (10 sec: 3275.6, 60 sec: 3549.6, 300 sec: 3707.2). Total num frames: 2695168. Throughput: 0: 906.5. Samples: 674678. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
540 |
+
[2024-08-21 21:10:10,801][00286] Avg episode reward: [(0, '4.945')]
|
541 |
+
[2024-08-21 21:10:12,973][03214] Updated weights for policy 0, policy_version 660 (0.0036)
|
542 |
+
[2024-08-21 21:10:15,793][00286] Fps is (10 sec: 3277.4, 60 sec: 3618.1, 300 sec: 3707.3). Total num frames: 2715648. Throughput: 0: 883.0. Samples: 676906. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
543 |
+
[2024-08-21 21:10:15,798][00286] Avg episode reward: [(0, '4.805')]
|
544 |
+
[2024-08-21 21:10:20,793][00286] Fps is (10 sec: 4097.4, 60 sec: 3754.7, 300 sec: 3707.2). Total num frames: 2736128. Throughput: 0: 922.4. Samples: 683618. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
545 |
+
[2024-08-21 21:10:20,795][00286] Avg episode reward: [(0, '4.758')]
|
546 |
+
[2024-08-21 21:10:21,931][03214] Updated weights for policy 0, policy_version 670 (0.0026)
|
547 |
+
[2024-08-21 21:10:25,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3707.2). Total num frames: 2752512. Throughput: 0: 941.8. Samples: 689368. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
548 |
+
[2024-08-21 21:10:25,795][00286] Avg episode reward: [(0, '4.566')]
|
549 |
+
[2024-08-21 21:10:30,793][00286] Fps is (10 sec: 3276.9, 60 sec: 3549.9, 300 sec: 3707.2). Total num frames: 2768896. Throughput: 0: 916.2. Samples: 691422. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
550 |
+
[2024-08-21 21:10:30,798][00286] Avg episode reward: [(0, '4.422')]
|
551 |
+
[2024-08-21 21:10:33,866][03214] Updated weights for policy 0, policy_version 680 (0.0029)
|
552 |
+
[2024-08-21 21:10:35,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3754.7, 300 sec: 3721.1). Total num frames: 2793472. Throughput: 0: 915.0. Samples: 697460. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
553 |
+
[2024-08-21 21:10:35,794][00286] Avg episode reward: [(0, '4.577')]
|
554 |
+
[2024-08-21 21:10:40,794][00286] Fps is (10 sec: 4505.1, 60 sec: 3822.9, 300 sec: 3721.1). Total num frames: 2813952. Throughput: 0: 967.4. Samples: 703982. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
555 |
+
[2024-08-21 21:10:40,801][00286] Avg episode reward: [(0, '4.717')]
|
556 |
+
[2024-08-21 21:10:44,755][03214] Updated weights for policy 0, policy_version 690 (0.0027)
|
557 |
+
[2024-08-21 21:10:45,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3618.2, 300 sec: 3707.2). Total num frames: 2826240. Throughput: 0: 939.6. Samples: 705968. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
558 |
+
[2024-08-21 21:10:45,796][00286] Avg episode reward: [(0, '4.850')]
|
559 |
+
[2024-08-21 21:10:50,793][00286] Fps is (10 sec: 3277.2, 60 sec: 3686.4, 300 sec: 3721.1). Total num frames: 2846720. Throughput: 0: 901.9. Samples: 710842. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
560 |
+
[2024-08-21 21:10:50,799][00286] Avg episode reward: [(0, '4.756')]
|
561 |
+
[2024-08-21 21:10:55,197][03214] Updated weights for policy 0, policy_version 700 (0.0031)
|
562 |
+
[2024-08-21 21:10:55,796][00286] Fps is (10 sec: 4094.9, 60 sec: 3822.8, 300 sec: 3707.2). Total num frames: 2867200. Throughput: 0: 952.4. Samples: 717534. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
563 |
+
[2024-08-21 21:10:55,798][00286] Avg episode reward: [(0, '4.786')]
|
564 |
+
[2024-08-21 21:11:00,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3707.2). Total num frames: 2883584. Throughput: 0: 969.8. Samples: 720548. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
565 |
+
[2024-08-21 21:11:00,799][00286] Avg episode reward: [(0, '4.701')]
|
566 |
+
[2024-08-21 21:11:05,793][00286] Fps is (10 sec: 3277.7, 60 sec: 3618.2, 300 sec: 3721.1). Total num frames: 2899968. Throughput: 0: 913.9. Samples: 724742. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
567 |
+
[2024-08-21 21:11:05,795][00286] Avg episode reward: [(0, '4.599')]
|
568 |
+
[2024-08-21 21:11:07,107][03214] Updated weights for policy 0, policy_version 710 (0.0037)
|
569 |
+
[2024-08-21 21:11:10,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3754.9, 300 sec: 3707.2). Total num frames: 2920448. Throughput: 0: 927.6. Samples: 731112. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
570 |
+
[2024-08-21 21:11:10,807][00286] Avg episode reward: [(0, '4.587')]
|
571 |
+
[2024-08-21 21:11:15,793][00286] Fps is (10 sec: 4095.9, 60 sec: 3754.7, 300 sec: 3707.2). Total num frames: 2940928. Throughput: 0: 955.7. Samples: 734428. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
572 |
+
[2024-08-21 21:11:15,795][00286] Avg episode reward: [(0, '4.558')]
|
573 |
+
[2024-08-21 21:11:17,484][03214] Updated weights for policy 0, policy_version 720 (0.0035)
|
574 |
+
[2024-08-21 21:11:20,795][00286] Fps is (10 sec: 3685.7, 60 sec: 3686.3, 300 sec: 3721.1). Total num frames: 2957312. Throughput: 0: 925.8. Samples: 739124. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
575 |
+
[2024-08-21 21:11:20,797][00286] Avg episode reward: [(0, '4.591')]
|
576 |
+
[2024-08-21 21:11:20,809][03197] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000722_2957312.pth...
|
577 |
+
[2024-08-21 21:11:20,971][03197] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000505_2068480.pth
|
578 |
+
[2024-08-21 21:11:25,793][00286] Fps is (10 sec: 3276.9, 60 sec: 3686.4, 300 sec: 3707.2). Total num frames: 2973696. Throughput: 0: 897.8. Samples: 744384. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
579 |
+
[2024-08-21 21:11:25,798][00286] Avg episode reward: [(0, '4.473')]
|
580 |
+
[2024-08-21 21:11:28,768][03214] Updated weights for policy 0, policy_version 730 (0.0023)
|
581 |
+
[2024-08-21 21:11:30,793][00286] Fps is (10 sec: 4096.8, 60 sec: 3822.9, 300 sec: 3707.2). Total num frames: 2998272. Throughput: 0: 925.9. Samples: 747632. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
582 |
+
[2024-08-21 21:11:30,797][00286] Avg episode reward: [(0, '4.562')]
|
583 |
+
[2024-08-21 21:11:35,797][00286] Fps is (10 sec: 4094.4, 60 sec: 3686.2, 300 sec: 3707.2). Total num frames: 3014656. Throughput: 0: 944.4. Samples: 753342. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
584 |
+
[2024-08-21 21:11:35,807][00286] Avg episode reward: [(0, '4.714')]
|
585 |
+
[2024-08-21 21:11:40,793][00286] Fps is (10 sec: 2867.2, 60 sec: 3549.9, 300 sec: 3693.4). Total num frames: 3026944. Throughput: 0: 891.5. Samples: 757650. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
586 |
+
[2024-08-21 21:11:40,795][00286] Avg episode reward: [(0, '4.550')]
|
587 |
+
[2024-08-21 21:11:40,871][03214] Updated weights for policy 0, policy_version 740 (0.0032)
|
588 |
+
[2024-08-21 21:11:45,793][00286] Fps is (10 sec: 3687.8, 60 sec: 3754.7, 300 sec: 3707.2). Total num frames: 3051520. Throughput: 0: 899.3. Samples: 761018. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
589 |
+
[2024-08-21 21:11:45,801][00286] Avg episode reward: [(0, '4.371')]
|
590 |
+
[2024-08-21 21:11:50,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3693.3). Total num frames: 3067904. Throughput: 0: 947.3. Samples: 767372. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
591 |
+
[2024-08-21 21:11:50,796][00286] Avg episode reward: [(0, '4.738')]
|
592 |
+
[2024-08-21 21:11:50,883][03214] Updated weights for policy 0, policy_version 750 (0.0025)
|
593 |
+
[2024-08-21 21:11:55,793][00286] Fps is (10 sec: 2867.2, 60 sec: 3550.0, 300 sec: 3693.4). Total num frames: 3080192. Throughput: 0: 890.7. Samples: 771194. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
594 |
+
[2024-08-21 21:11:55,799][00286] Avg episode reward: [(0, '4.803')]
|
595 |
+
[2024-08-21 21:12:00,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3707.2). Total num frames: 3104768. Throughput: 0: 879.1. Samples: 773986. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
596 |
+
[2024-08-21 21:12:00,799][00286] Avg episode reward: [(0, '4.572')]
|
597 |
+
[2024-08-21 21:12:02,602][03214] Updated weights for policy 0, policy_version 760 (0.0035)
|
598 |
+
[2024-08-21 21:12:05,793][00286] Fps is (10 sec: 4505.6, 60 sec: 3754.7, 300 sec: 3693.3). Total num frames: 3125248. Throughput: 0: 925.5. Samples: 780768. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
599 |
+
[2024-08-21 21:12:05,799][00286] Avg episode reward: [(0, '4.720')]
|
600 |
+
[2024-08-21 21:12:10,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3693.3). Total num frames: 3141632. Throughput: 0: 918.4. Samples: 785712. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
601 |
+
[2024-08-21 21:12:10,804][00286] Avg episode reward: [(0, '4.939')]
|
602 |
+
[2024-08-21 21:12:14,962][03214] Updated weights for policy 0, policy_version 770 (0.0037)
|
603 |
+
[2024-08-21 21:12:15,793][00286] Fps is (10 sec: 2867.2, 60 sec: 3549.9, 300 sec: 3679.5). Total num frames: 3153920. Throughput: 0: 889.9. Samples: 787678. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
604 |
+
[2024-08-21 21:12:15,798][00286] Avg episode reward: [(0, '4.696')]
|
605 |
+
[2024-08-21 21:12:20,793][00286] Fps is (10 sec: 3686.5, 60 sec: 3686.5, 300 sec: 3693.3). Total num frames: 3178496. Throughput: 0: 898.4. Samples: 793768. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
606 |
+
[2024-08-21 21:12:20,798][00286] Avg episode reward: [(0, '4.615')]
|
607 |
+
[2024-08-21 21:12:24,856][03214] Updated weights for policy 0, policy_version 780 (0.0018)
|
608 |
+
[2024-08-21 21:12:25,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3679.5). Total num frames: 3194880. Throughput: 0: 931.6. Samples: 799570. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
609 |
+
[2024-08-21 21:12:25,801][00286] Avg episode reward: [(0, '4.665')]
|
610 |
+
[2024-08-21 21:12:30,796][00286] Fps is (10 sec: 2866.3, 60 sec: 3481.4, 300 sec: 3679.4). Total num frames: 3207168. Throughput: 0: 896.4. Samples: 801358. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
611 |
+
[2024-08-21 21:12:30,807][00286] Avg episode reward: [(0, '4.655')]
|
612 |
+
[2024-08-21 21:12:35,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3550.1, 300 sec: 3679.5). Total num frames: 3227648. Throughput: 0: 864.5. Samples: 806274. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
613 |
+
[2024-08-21 21:12:35,795][00286] Avg episode reward: [(0, '4.482')]
|
614 |
+
[2024-08-21 21:12:37,372][03214] Updated weights for policy 0, policy_version 790 (0.0026)
|
615 |
+
[2024-08-21 21:12:40,793][00286] Fps is (10 sec: 4097.3, 60 sec: 3686.4, 300 sec: 3665.6). Total num frames: 3248128. Throughput: 0: 917.4. Samples: 812478. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
616 |
+
[2024-08-21 21:12:40,795][00286] Avg episode reward: [(0, '4.408')]
|
617 |
+
[2024-08-21 21:12:45,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3651.7). Total num frames: 3260416. Throughput: 0: 910.9. Samples: 814978. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
618 |
+
[2024-08-21 21:12:45,799][00286] Avg episode reward: [(0, '4.535')]
|
619 |
+
[2024-08-21 21:12:49,950][03214] Updated weights for policy 0, policy_version 800 (0.0047)
|
620 |
+
[2024-08-21 21:12:50,793][00286] Fps is (10 sec: 2867.2, 60 sec: 3481.6, 300 sec: 3651.7). Total num frames: 3276800. Throughput: 0: 848.5. Samples: 818952. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
621 |
+
[2024-08-21 21:12:50,802][00286] Avg episode reward: [(0, '4.750')]
|
622 |
+
[2024-08-21 21:12:55,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3651.7). Total num frames: 3301376. Throughput: 0: 881.3. Samples: 825368. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
623 |
+
[2024-08-21 21:12:55,800][00286] Avg episode reward: [(0, '4.613')]
|
624 |
+
[2024-08-21 21:13:00,319][03214] Updated weights for policy 0, policy_version 810 (0.0026)
|
625 |
+
[2024-08-21 21:13:00,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3549.9, 300 sec: 3651.7). Total num frames: 3317760. Throughput: 0: 906.9. Samples: 828488. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
626 |
+
[2024-08-21 21:13:00,800][00286] Avg episode reward: [(0, '4.649')]
|
627 |
+
[2024-08-21 21:13:05,793][00286] Fps is (10 sec: 2867.2, 60 sec: 3413.3, 300 sec: 3651.7). Total num frames: 3330048. Throughput: 0: 860.4. Samples: 832488. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
628 |
+
[2024-08-21 21:13:05,799][00286] Avg episode reward: [(0, '4.658')]
|
629 |
+
[2024-08-21 21:13:10,793][00286] Fps is (10 sec: 3276.9, 60 sec: 3481.6, 300 sec: 3637.8). Total num frames: 3350528. Throughput: 0: 853.2. Samples: 837962. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
630 |
+
[2024-08-21 21:13:10,800][00286] Avg episode reward: [(0, '4.678')]
|
631 |
+
[2024-08-21 21:13:12,480][03214] Updated weights for policy 0, policy_version 820 (0.0018)
|
632 |
+
[2024-08-21 21:13:15,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3637.8). Total num frames: 3371008. Throughput: 0: 884.1. Samples: 841138. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
633 |
+
[2024-08-21 21:13:15,799][00286] Avg episode reward: [(0, '4.642')]
|
634 |
+
[2024-08-21 21:13:20,793][00286] Fps is (10 sec: 3276.7, 60 sec: 3413.3, 300 sec: 3637.8). Total num frames: 3383296. Throughput: 0: 889.0. Samples: 846280. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
635 |
+
[2024-08-21 21:13:20,798][00286] Avg episode reward: [(0, '4.712')]
|
636 |
+
[2024-08-21 21:13:20,816][03197] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000826_3383296.pth...
|
637 |
+
[2024-08-21 21:13:20,999][03197] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000613_2510848.pth
|
638 |
+
[2024-08-21 21:13:24,744][03214] Updated weights for policy 0, policy_version 830 (0.0032)
|
639 |
+
[2024-08-21 21:13:25,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3651.7). Total num frames: 3403776. Throughput: 0: 859.2. Samples: 851140. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
640 |
+
[2024-08-21 21:13:25,804][00286] Avg episode reward: [(0, '5.040')]
|
641 |
+
[2024-08-21 21:13:30,793][00286] Fps is (10 sec: 4096.1, 60 sec: 3618.3, 300 sec: 3637.8). Total num frames: 3424256. Throughput: 0: 875.2. Samples: 854364. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
642 |
+
[2024-08-21 21:13:30,800][00286] Avg episode reward: [(0, '4.984')]
|
643 |
+
[2024-08-21 21:13:34,498][03214] Updated weights for policy 0, policy_version 840 (0.0020)
|
644 |
+
[2024-08-21 21:13:35,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3637.8). Total num frames: 3440640. Throughput: 0: 925.1. Samples: 860582. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
645 |
+
[2024-08-21 21:13:35,798][00286] Avg episode reward: [(0, '4.495')]
|
646 |
+
[2024-08-21 21:13:40,793][00286] Fps is (10 sec: 3276.7, 60 sec: 3481.6, 300 sec: 3637.8). Total num frames: 3457024. Throughput: 0: 870.1. Samples: 864522. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
647 |
+
[2024-08-21 21:13:40,797][00286] Avg episode reward: [(0, '4.425')]
|
648 |
+
[2024-08-21 21:13:45,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3637.8). Total num frames: 3477504. Throughput: 0: 870.3. Samples: 867652. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
649 |
+
[2024-08-21 21:13:45,796][00286] Avg episode reward: [(0, '4.404')]
|
650 |
+
[2024-08-21 21:13:46,212][03214] Updated weights for policy 0, policy_version 850 (0.0023)
|
651 |
+
[2024-08-21 21:13:50,794][00286] Fps is (10 sec: 4505.4, 60 sec: 3754.6, 300 sec: 3651.7). Total num frames: 3502080. Throughput: 0: 930.2. Samples: 874348. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
652 |
+
[2024-08-21 21:13:50,796][00286] Avg episode reward: [(0, '4.561')]
|
653 |
+
[2024-08-21 21:13:55,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3637.8). Total num frames: 3514368. Throughput: 0: 913.1. Samples: 879050. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
654 |
+
[2024-08-21 21:13:55,795][00286] Avg episode reward: [(0, '4.639')]
|
655 |
+
[2024-08-21 21:13:58,297][03214] Updated weights for policy 0, policy_version 860 (0.0015)
|
656 |
+
[2024-08-21 21:14:00,793][00286] Fps is (10 sec: 2867.4, 60 sec: 3549.9, 300 sec: 3637.8). Total num frames: 3530752. Throughput: 0: 890.7. Samples: 881220. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
657 |
+
[2024-08-21 21:14:00,799][00286] Avg episode reward: [(0, '4.772')]
|
658 |
+
[2024-08-21 21:14:05,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3754.7, 300 sec: 3637.8). Total num frames: 3555328. Throughput: 0: 926.4. Samples: 887968. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
659 |
+
[2024-08-21 21:14:05,801][00286] Avg episode reward: [(0, '4.756')]
|
660 |
+
[2024-08-21 21:14:07,343][03214] Updated weights for policy 0, policy_version 870 (0.0020)
|
661 |
+
[2024-08-21 21:14:10,793][00286] Fps is (10 sec: 4095.8, 60 sec: 3686.4, 300 sec: 3637.8). Total num frames: 3571712. Throughput: 0: 945.3. Samples: 893680. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
662 |
+
[2024-08-21 21:14:10,796][00286] Avg episode reward: [(0, '4.483')]
|
663 |
+
[2024-08-21 21:14:15,793][00286] Fps is (10 sec: 2867.2, 60 sec: 3549.9, 300 sec: 3637.8). Total num frames: 3584000. Throughput: 0: 918.3. Samples: 895688. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
664 |
+
[2024-08-21 21:14:15,795][00286] Avg episode reward: [(0, '4.528')]
|
665 |
+
[2024-08-21 21:14:19,349][03214] Updated weights for policy 0, policy_version 880 (0.0028)
|
666 |
+
[2024-08-21 21:14:20,793][00286] Fps is (10 sec: 3686.6, 60 sec: 3754.7, 300 sec: 3651.7). Total num frames: 3608576. Throughput: 0: 911.7. Samples: 901608. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
667 |
+
[2024-08-21 21:14:20,795][00286] Avg episode reward: [(0, '4.662')]
|
668 |
+
[2024-08-21 21:14:25,793][00286] Fps is (10 sec: 4505.6, 60 sec: 3754.7, 300 sec: 3637.8). Total num frames: 3629056. Throughput: 0: 971.1. Samples: 908222. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
669 |
+
[2024-08-21 21:14:25,802][00286] Avg episode reward: [(0, '4.628')]
|
670 |
+
[2024-08-21 21:14:30,798][00286] Fps is (10 sec: 3275.3, 60 sec: 3617.8, 300 sec: 3637.7). Total num frames: 3641344. Throughput: 0: 945.9. Samples: 910224. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
671 |
+
[2024-08-21 21:14:30,804][00286] Avg episode reward: [(0, '4.664')]
|
672 |
+
[2024-08-21 21:14:30,989][03214] Updated weights for policy 0, policy_version 890 (0.0023)
|
673 |
+
[2024-08-21 21:14:35,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3651.7). Total num frames: 3661824. Throughput: 0: 901.5. Samples: 914914. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
674 |
+
[2024-08-21 21:14:35,800][00286] Avg episode reward: [(0, '4.649')]
|
675 |
+
[2024-08-21 21:14:40,793][00286] Fps is (10 sec: 4097.9, 60 sec: 3754.7, 300 sec: 3637.8). Total num frames: 3682304. Throughput: 0: 936.9. Samples: 921212. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
676 |
+
[2024-08-21 21:14:40,795][00286] Avg episode reward: [(0, '4.534')]
|
677 |
+
[2024-08-21 21:14:41,179][03214] Updated weights for policy 0, policy_version 900 (0.0024)
|
678 |
+
[2024-08-21 21:14:45,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3637.8). Total num frames: 3698688. Throughput: 0: 955.1. Samples: 924200. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
679 |
+
[2024-08-21 21:14:45,800][00286] Avg episode reward: [(0, '4.441')]
|
680 |
+
[2024-08-21 21:14:50,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3651.7). Total num frames: 3715072. Throughput: 0: 895.2. Samples: 928254. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
681 |
+
[2024-08-21 21:14:50,795][00286] Avg episode reward: [(0, '4.296')]
|
682 |
+
[2024-08-21 21:14:53,311][03214] Updated weights for policy 0, policy_version 910 (0.0033)
|
683 |
+
[2024-08-21 21:14:55,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3637.8). Total num frames: 3735552. Throughput: 0: 908.8. Samples: 934576. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
684 |
+
[2024-08-21 21:14:55,797][00286] Avg episode reward: [(0, '4.250')]
|
685 |
+
[2024-08-21 21:15:00,793][00286] Fps is (10 sec: 4095.9, 60 sec: 3754.6, 300 sec: 3637.8). Total num frames: 3756032. Throughput: 0: 939.5. Samples: 937968. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
686 |
+
[2024-08-21 21:15:00,797][00286] Avg episode reward: [(0, '4.436')]
|
687 |
+
[2024-08-21 21:15:04,096][03214] Updated weights for policy 0, policy_version 920 (0.0021)
|
688 |
+
[2024-08-21 21:15:05,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3651.7). Total num frames: 3772416. Throughput: 0: 916.1. Samples: 942832. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
689 |
+
[2024-08-21 21:15:05,797][00286] Avg episode reward: [(0, '4.666')]
|
690 |
+
[2024-08-21 21:15:10,793][00286] Fps is (10 sec: 3276.9, 60 sec: 3618.2, 300 sec: 3637.8). Total num frames: 3788800. Throughput: 0: 888.4. Samples: 948202. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
691 |
+
[2024-08-21 21:15:10,795][00286] Avg episode reward: [(0, '4.793')]
|
692 |
+
[2024-08-21 21:15:14,656][03214] Updated weights for policy 0, policy_version 930 (0.0031)
|
693 |
+
[2024-08-21 21:15:15,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3651.7). Total num frames: 3813376. Throughput: 0: 915.7. Samples: 951426. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
694 |
+
[2024-08-21 21:15:15,798][00286] Avg episode reward: [(0, '4.313')]
|
695 |
+
[2024-08-21 21:15:20,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3651.7). Total num frames: 3829760. Throughput: 0: 939.6. Samples: 957196. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
696 |
+
[2024-08-21 21:15:20,797][00286] Avg episode reward: [(0, '4.316')]
|
697 |
+
[2024-08-21 21:15:20,808][03197] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000935_3829760.pth...
|
698 |
+
[2024-08-21 21:15:20,966][03197] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000722_2957312.pth
|
699 |
+
[2024-08-21 21:15:25,793][00286] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3651.7). Total num frames: 3846144. Throughput: 0: 897.9. Samples: 961616. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
700 |
+
[2024-08-21 21:15:25,798][00286] Avg episode reward: [(0, '4.450')]
|
701 |
+
[2024-08-21 21:15:26,661][03214] Updated weights for policy 0, policy_version 940 (0.0034)
|
702 |
+
[2024-08-21 21:15:30,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3755.0, 300 sec: 3637.8). Total num frames: 3866624. Throughput: 0: 907.5. Samples: 965036. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
703 |
+
[2024-08-21 21:15:30,796][00286] Avg episode reward: [(0, '4.609')]
|
704 |
+
[2024-08-21 21:15:35,796][00286] Fps is (10 sec: 4094.9, 60 sec: 3754.5, 300 sec: 3637.8). Total num frames: 3887104. Throughput: 0: 966.4. Samples: 971746. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
705 |
+
[2024-08-21 21:15:35,798][00286] Avg episode reward: [(0, '4.449')]
|
706 |
+
[2024-08-21 21:15:36,454][03214] Updated weights for policy 0, policy_version 950 (0.0021)
|
707 |
+
[2024-08-21 21:15:40,796][00286] Fps is (10 sec: 3275.9, 60 sec: 3618.0, 300 sec: 3637.8). Total num frames: 3899392. Throughput: 0: 918.8. Samples: 975924. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
708 |
+
[2024-08-21 21:15:40,798][00286] Avg episode reward: [(0, '4.630')]
|
709 |
+
[2024-08-21 21:15:45,793][00286] Fps is (10 sec: 3277.7, 60 sec: 3686.4, 300 sec: 3637.8). Total num frames: 3919872. Throughput: 0: 901.2. Samples: 978520. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
710 |
+
[2024-08-21 21:15:45,795][00286] Avg episode reward: [(0, '4.917')]
|
711 |
+
[2024-08-21 21:15:47,792][03214] Updated weights for policy 0, policy_version 960 (0.0026)
|
712 |
+
[2024-08-21 21:15:50,793][00286] Fps is (10 sec: 4506.8, 60 sec: 3822.9, 300 sec: 3651.7). Total num frames: 3944448. Throughput: 0: 942.9. Samples: 985264. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
713 |
+
[2024-08-21 21:15:50,795][00286] Avg episode reward: [(0, '4.799')]
|
714 |
+
[2024-08-21 21:15:55,793][00286] Fps is (10 sec: 4096.0, 60 sec: 3754.7, 300 sec: 3651.7). Total num frames: 3960832. Throughput: 0: 937.7. Samples: 990400. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
715 |
+
[2024-08-21 21:15:55,795][00286] Avg episode reward: [(0, '4.624')]
|
716 |
+
[2024-08-21 21:15:59,788][03214] Updated weights for policy 0, policy_version 970 (0.0030)
|
717 |
+
[2024-08-21 21:16:00,793][00286] Fps is (10 sec: 3276.7, 60 sec: 3686.4, 300 sec: 3651.7). Total num frames: 3977216. Throughput: 0: 911.4. Samples: 992438. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
718 |
+
[2024-08-21 21:16:00,803][00286] Avg episode reward: [(0, '4.561')]
|
719 |
+
[2024-08-21 21:16:05,793][00286] Fps is (10 sec: 3686.4, 60 sec: 3754.7, 300 sec: 3651.7). Total num frames: 3997696. Throughput: 0: 923.9. Samples: 998770. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
720 |
+
[2024-08-21 21:16:05,798][00286] Avg episode reward: [(0, '4.624')]
|
721 |
+
[2024-08-21 21:16:07,286][03197] Stopping Batcher_0...
|
722 |
+
[2024-08-21 21:16:07,286][03197] Loop batcher_evt_loop terminating...
|
723 |
+
[2024-08-21 21:16:07,293][03197] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
|
724 |
+
[2024-08-21 21:16:07,296][00286] Component Batcher_0 stopped!
|
725 |
+
[2024-08-21 21:16:07,341][03214] Weights refcount: 2 0
|
726 |
+
[2024-08-21 21:16:07,344][03214] Stopping InferenceWorker_p0-w0...
|
727 |
+
[2024-08-21 21:16:07,345][03214] Loop inference_proc0-0_evt_loop terminating...
|
728 |
+
[2024-08-21 21:16:07,344][00286] Component InferenceWorker_p0-w0 stopped!
|
729 |
+
[2024-08-21 21:16:07,450][03197] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000826_3383296.pth
|
730 |
+
[2024-08-21 21:16:07,461][03197] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
|
731 |
+
[2024-08-21 21:16:07,702][03197] Stopping LearnerWorker_p0...
|
732 |
+
[2024-08-21 21:16:07,707][03197] Loop learner_proc0_evt_loop terminating...
|
733 |
+
[2024-08-21 21:16:07,704][03215] Stopping RolloutWorker_w0...
|
734 |
+
[2024-08-21 21:16:07,706][00286] Component LearnerWorker_p0 stopped!
|
735 |
+
[2024-08-21 21:16:07,711][03215] Loop rollout_proc0_evt_loop terminating...
|
736 |
+
[2024-08-21 21:16:07,721][00286] Component RolloutWorker_w0 stopped!
|
737 |
+
[2024-08-21 21:16:07,733][00286] Component RolloutWorker_w2 stopped!
|
738 |
+
[2024-08-21 21:16:07,737][03216] Stopping RolloutWorker_w2...
|
739 |
+
[2024-08-21 21:16:07,740][03216] Loop rollout_proc2_evt_loop terminating...
|
740 |
+
[2024-08-21 21:16:07,756][03217] Stopping RolloutWorker_w1...
|
741 |
+
[2024-08-21 21:16:07,757][03217] Loop rollout_proc1_evt_loop terminating...
|
742 |
+
[2024-08-21 21:16:07,756][00286] Component RolloutWorker_w1 stopped!
|
743 |
+
[2024-08-21 21:16:07,774][03219] Stopping RolloutWorker_w5...
|
744 |
+
[2024-08-21 21:16:07,774][00286] Component RolloutWorker_w5 stopped!
|
745 |
+
[2024-08-21 21:16:07,776][03219] Loop rollout_proc5_evt_loop terminating...
|
746 |
+
[2024-08-21 21:16:07,785][00286] Component RolloutWorker_w7 stopped!
|
747 |
+
[2024-08-21 21:16:07,788][03221] Stopping RolloutWorker_w7...
|
748 |
+
[2024-08-21 21:16:07,794][00286] Component RolloutWorker_w3 stopped!
|
749 |
+
[2024-08-21 21:16:07,798][03218] Stopping RolloutWorker_w3...
|
750 |
+
[2024-08-21 21:16:07,790][03221] Loop rollout_proc7_evt_loop terminating...
|
751 |
+
[2024-08-21 21:16:07,799][03218] Loop rollout_proc3_evt_loop terminating...
|
752 |
+
[2024-08-21 21:16:07,810][03222] Stopping RolloutWorker_w6...
|
753 |
+
[2024-08-21 21:16:07,810][00286] Component RolloutWorker_w6 stopped!
|
754 |
+
[2024-08-21 21:16:07,815][03222] Loop rollout_proc6_evt_loop terminating...
|
755 |
+
[2024-08-21 21:16:07,888][03220] Stopping RolloutWorker_w4...
|
756 |
+
[2024-08-21 21:16:07,888][00286] Component RolloutWorker_w4 stopped!
|
757 |
+
[2024-08-21 21:16:07,892][03220] Loop rollout_proc4_evt_loop terminating...
|
758 |
+
[2024-08-21 21:16:07,892][00286] Waiting for process learner_proc0 to stop...
|
759 |
+
[2024-08-21 21:16:09,260][00286] Waiting for process inference_proc0-0 to join...
|
760 |
+
[2024-08-21 21:16:09,267][00286] Waiting for process rollout_proc0 to join...
|
761 |
+
[2024-08-21 21:16:12,192][00286] Waiting for process rollout_proc1 to join...
|
762 |
+
[2024-08-21 21:16:12,196][00286] Waiting for process rollout_proc2 to join...
|
763 |
+
[2024-08-21 21:16:12,200][00286] Waiting for process rollout_proc3 to join...
|
764 |
+
[2024-08-21 21:16:12,205][00286] Waiting for process rollout_proc4 to join...
|
765 |
+
[2024-08-21 21:16:12,210][00286] Waiting for process rollout_proc5 to join...
|
766 |
+
[2024-08-21 21:16:12,214][00286] Waiting for process rollout_proc6 to join...
|
767 |
+
[2024-08-21 21:16:12,220][00286] Waiting for process rollout_proc7 to join...
|
768 |
+
[2024-08-21 21:16:12,225][00286] Batcher 0 profile tree view:
|
769 |
+
batching: 26.8707, releasing_batches: 0.0289
|
770 |
+
[2024-08-21 21:16:12,227][00286] InferenceWorker_p0-w0 profile tree view:
|
771 |
+
wait_policy: 0.0000
|
772 |
+
wait_policy_total: 423.7363
|
773 |
+
update_model: 9.4612
|
774 |
+
weight_update: 0.0030
|
775 |
+
one_step: 0.0058
|
776 |
+
handle_policy_step: 623.2701
|
777 |
+
deserialize: 16.6336, stack: 3.2684, obs_to_device_normalize: 125.7079, forward: 333.4557, send_messages: 30.2082
|
778 |
+
prepare_outputs: 83.4280
|
779 |
+
to_cpu: 47.9837
|
780 |
+
[2024-08-21 21:16:12,230][00286] Learner 0 profile tree view:
|
781 |
+
misc: 0.0052, prepare_batch: 14.6260
|
782 |
+
train: 75.3809
|
783 |
+
epoch_init: 0.0133, minibatch_init: 0.0111, losses_postprocess: 0.6441, kl_divergence: 0.6862, after_optimizer: 34.4669
|
784 |
+
calculate_losses: 27.4056
|
785 |
+
losses_init: 0.0136, forward_head: 1.3108, bptt_initial: 17.9403, tail: 1.1918, advantages_returns: 0.2754, losses: 3.9182
|
786 |
+
bptt: 2.3882
|
787 |
+
bptt_forward_core: 2.2559
|
788 |
+
update: 11.4269
|
789 |
+
clip: 0.9519
|
790 |
+
[2024-08-21 21:16:12,234][00286] RolloutWorker_w0 profile tree view:
|
791 |
+
wait_for_trajectories: 0.3563, enqueue_policy_requests: 104.1912, env_step: 857.6223, overhead: 14.4259, complete_rollouts: 6.8248
|
792 |
+
save_policy_outputs: 22.5305
|
793 |
+
split_output_tensors: 9.1667
|
794 |
+
[2024-08-21 21:16:12,236][00286] RolloutWorker_w7 profile tree view:
|
795 |
+
wait_for_trajectories: 0.3231, enqueue_policy_requests: 105.1923, env_step: 850.3135, overhead: 14.6101, complete_rollouts: 7.4775
|
796 |
+
save_policy_outputs: 22.1450
|
797 |
+
split_output_tensors: 8.8024
|
798 |
+
[2024-08-21 21:16:12,237][00286] Loop Runner_EvtLoop terminating...
|
799 |
+
[2024-08-21 21:16:12,239][00286] Runner profile tree view:
|
800 |
+
main_loop: 1127.0205
|
801 |
+
[2024-08-21 21:16:12,242][00286] Collected {0: 4005888}, FPS: 3554.4
|
802 |
+
[2024-08-21 21:16:12,276][00286] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json
|
803 |
+
[2024-08-21 21:16:12,278][00286] Overriding arg 'num_workers' with value 1 passed from command line
|
804 |
+
[2024-08-21 21:16:12,280][00286] Adding new argument 'no_render'=True that is not in the saved config file!
|
805 |
+
[2024-08-21 21:16:12,281][00286] Adding new argument 'save_video'=True that is not in the saved config file!
|
806 |
+
[2024-08-21 21:16:12,283][00286] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
|
807 |
+
[2024-08-21 21:16:12,285][00286] Adding new argument 'video_name'=None that is not in the saved config file!
|
808 |
+
[2024-08-21 21:16:12,287][00286] Adding new argument 'max_num_frames'=1000000000.0 that is not in the saved config file!
|
809 |
+
[2024-08-21 21:16:12,288][00286] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
|
810 |
+
[2024-08-21 21:16:12,290][00286] Adding new argument 'push_to_hub'=False that is not in the saved config file!
|
811 |
+
[2024-08-21 21:16:12,291][00286] Adding new argument 'hf_repository'=None that is not in the saved config file!
|
812 |
+
[2024-08-21 21:16:12,294][00286] Adding new argument 'policy_index'=0 that is not in the saved config file!
|
813 |
+
[2024-08-21 21:16:12,295][00286] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
|
814 |
+
[2024-08-21 21:16:12,296][00286] Adding new argument 'train_script'=None that is not in the saved config file!
|
815 |
+
[2024-08-21 21:16:12,298][00286] Adding new argument 'enjoy_script'=None that is not in the saved config file!
|
816 |
+
[2024-08-21 21:16:12,300][00286] Using frameskip 1 and render_action_repeat=4 for evaluation
|
817 |
+
[2024-08-21 21:16:12,354][00286] Doom resolution: 160x120, resize resolution: (128, 72)
|
818 |
+
[2024-08-21 21:16:12,360][00286] RunningMeanStd input shape: (3, 72, 128)
|
819 |
+
[2024-08-21 21:16:12,362][00286] RunningMeanStd input shape: (1,)
|
820 |
+
[2024-08-21 21:16:12,392][00286] ConvEncoder: input_channels=3
|
821 |
+
[2024-08-21 21:16:12,579][00286] Conv encoder output size: 512
|
822 |
+
[2024-08-21 21:16:12,581][00286] Policy head output size: 512
|
823 |
+
[2024-08-21 21:16:12,815][00286] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
|
824 |
+
[2024-08-21 21:16:13,622][00286] Num frames 100...
|
825 |
+
[2024-08-21 21:16:13,749][00286] Num frames 200...
|
826 |
+
[2024-08-21 21:16:13,873][00286] Num frames 300...
|
827 |
+
[2024-08-21 21:16:14,038][00286] Avg episode rewards: #0: 3.840, true rewards: #0: 3.840
|
828 |
+
[2024-08-21 21:16:14,040][00286] Avg episode reward: 3.840, avg true_objective: 3.840
|
829 |
+
[2024-08-21 21:16:14,064][00286] Num frames 400...
|
830 |
+
[2024-08-21 21:16:14,188][00286] Num frames 500...
|
831 |
+
[2024-08-21 21:16:14,316][00286] Num frames 600...
|
832 |
+
[2024-08-21 21:16:14,446][00286] Num frames 700...
|
833 |
+
[2024-08-21 21:16:14,583][00286] Avg episode rewards: #0: 3.840, true rewards: #0: 3.840
|
834 |
+
[2024-08-21 21:16:14,585][00286] Avg episode reward: 3.840, avg true_objective: 3.840
|
835 |
+
[2024-08-21 21:16:14,634][00286] Num frames 800...
|
836 |
+
[2024-08-21 21:16:14,760][00286] Num frames 900...
|
837 |
+
[2024-08-21 21:16:14,895][00286] Num frames 1000...
|
838 |
+
[2024-08-21 21:16:15,030][00286] Num frames 1100...
|
839 |
+
[2024-08-21 21:16:15,151][00286] Avg episode rewards: #0: 3.840, true rewards: #0: 3.840
|
840 |
+
[2024-08-21 21:16:15,152][00286] Avg episode reward: 3.840, avg true_objective: 3.840
|
841 |
+
[2024-08-21 21:16:15,215][00286] Num frames 1200...
|
842 |
+
[2024-08-21 21:16:15,343][00286] Num frames 1300...
|
843 |
+
[2024-08-21 21:16:15,470][00286] Num frames 1400...
|
844 |
+
[2024-08-21 21:16:15,598][00286] Num frames 1500...
|
845 |
+
[2024-08-21 21:16:15,699][00286] Avg episode rewards: #0: 3.840, true rewards: #0: 3.840
|
846 |
+
[2024-08-21 21:16:15,700][00286] Avg episode reward: 3.840, avg true_objective: 3.840
|
847 |
+
[2024-08-21 21:16:15,783][00286] Num frames 1600...
|
848 |
+
[2024-08-21 21:16:15,905][00286] Num frames 1700...
|
849 |
+
[2024-08-21 21:16:16,032][00286] Num frames 1800...
|
850 |
+
[2024-08-21 21:16:16,156][00286] Num frames 1900...
|
851 |
+
[2024-08-21 21:16:16,235][00286] Avg episode rewards: #0: 3.840, true rewards: #0: 3.840
|
852 |
+
[2024-08-21 21:16:16,237][00286] Avg episode reward: 3.840, avg true_objective: 3.840
|
853 |
+
[2024-08-21 21:16:16,338][00286] Num frames 2000...
|
854 |
+
[2024-08-21 21:16:16,468][00286] Num frames 2100...
|
855 |
+
[2024-08-21 21:16:16,589][00286] Num frames 2200...
|
856 |
+
[2024-08-21 21:16:16,713][00286] Num frames 2300...
|
857 |
+
[2024-08-21 21:16:16,837][00286] Num frames 2400...
|
858 |
+
[2024-08-21 21:16:16,935][00286] Avg episode rewards: #0: 4.387, true rewards: #0: 4.053
|
859 |
+
[2024-08-21 21:16:16,938][00286] Avg episode reward: 4.387, avg true_objective: 4.053
|
860 |
+
[2024-08-21 21:16:17,031][00286] Num frames 2500...
|
861 |
+
[2024-08-21 21:16:17,155][00286] Num frames 2600...
|
862 |
+
[2024-08-21 21:16:17,279][00286] Num frames 2700...
|
863 |
+
[2024-08-21 21:16:17,413][00286] Num frames 2800...
|
864 |
+
[2024-08-21 21:16:17,536][00286] Num frames 2900...
|
865 |
+
[2024-08-21 21:16:17,661][00286] Num frames 3000...
|
866 |
+
[2024-08-21 21:16:17,766][00286] Avg episode rewards: #0: 5.057, true rewards: #0: 4.343
|
867 |
+
[2024-08-21 21:16:17,768][00286] Avg episode reward: 5.057, avg true_objective: 4.343
|
868 |
+
[2024-08-21 21:16:17,840][00286] Num frames 3100...
|
869 |
+
[2024-08-21 21:16:17,961][00286] Num frames 3200...
|
870 |
+
[2024-08-21 21:16:18,090][00286] Num frames 3300...
|
871 |
+
[2024-08-21 21:16:18,211][00286] Num frames 3400...
|
872 |
+
[2024-08-21 21:16:18,368][00286] Avg episode rewards: #0: 5.110, true rewards: #0: 4.360
|
873 |
+
[2024-08-21 21:16:18,370][00286] Avg episode reward: 5.110, avg true_objective: 4.360
|
874 |
+
[2024-08-21 21:16:18,387][00286] Num frames 3500...
|
875 |
+
[2024-08-21 21:16:18,512][00286] Num frames 3600...
|
876 |
+
[2024-08-21 21:16:18,632][00286] Num frames 3700...
|
877 |
+
[2024-08-21 21:16:18,753][00286] Num frames 3800...
|
878 |
+
[2024-08-21 21:16:18,879][00286] Num frames 3900...
|
879 |
+
[2024-08-21 21:16:18,978][00286] Avg episode rewards: #0: 5.151, true rewards: #0: 4.373
|
880 |
+
[2024-08-21 21:16:18,980][00286] Avg episode reward: 5.151, avg true_objective: 4.373
|
881 |
+
[2024-08-21 21:16:19,065][00286] Num frames 4000...
|
882 |
+
[2024-08-21 21:16:19,187][00286] Num frames 4100...
|
883 |
+
[2024-08-21 21:16:19,307][00286] Num frames 4200...
|
884 |
+
[2024-08-21 21:16:19,441][00286] Num frames 4300...
|
885 |
+
[2024-08-21 21:16:19,521][00286] Avg episode rewards: #0: 5.020, true rewards: #0: 4.320
|
886 |
+
[2024-08-21 21:16:19,523][00286] Avg episode reward: 5.020, avg true_objective: 4.320
|
887 |
+
[2024-08-21 21:16:40,466][00286] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|
888 |
+
[2024-08-21 21:16:40,499][00286] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json
|
889 |
+
[2024-08-21 21:16:40,500][00286] Overriding arg 'num_workers' with value 1 passed from command line
|
890 |
+
[2024-08-21 21:16:40,502][00286] Adding new argument 'no_render'=True that is not in the saved config file!
|
891 |
+
[2024-08-21 21:16:40,503][00286] Adding new argument 'save_video'=True that is not in the saved config file!
|
892 |
+
[2024-08-21 21:16:40,505][00286] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
|
893 |
+
[2024-08-21 21:16:40,506][00286] Adding new argument 'video_name'=None that is not in the saved config file!
|
894 |
+
[2024-08-21 21:16:40,508][00286] Adding new argument 'max_num_frames'=100000 that is not in the saved config file!
|
895 |
+
[2024-08-21 21:16:40,509][00286] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
|
896 |
+
[2024-08-21 21:16:40,516][00286] Adding new argument 'push_to_hub'=True that is not in the saved config file!
|
897 |
+
[2024-08-21 21:16:40,517][00286] Adding new argument 'hf_repository'='fortminors/rl_course_vizdoom_health_gathering_supreme' that is not in the saved config file!
|
898 |
+
[2024-08-21 21:16:40,518][00286] Adding new argument 'policy_index'=0 that is not in the saved config file!
|
899 |
+
[2024-08-21 21:16:40,519][00286] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
|
900 |
+
[2024-08-21 21:16:40,520][00286] Adding new argument 'train_script'=None that is not in the saved config file!
|
901 |
+
[2024-08-21 21:16:40,521][00286] Adding new argument 'enjoy_script'=None that is not in the saved config file!
|
902 |
+
[2024-08-21 21:16:40,522][00286] Using frameskip 1 and render_action_repeat=4 for evaluation
|
903 |
+
[2024-08-21 21:16:40,550][00286] RunningMeanStd input shape: (3, 72, 128)
|
904 |
+
[2024-08-21 21:16:40,552][00286] RunningMeanStd input shape: (1,)
|
905 |
+
[2024-08-21 21:16:40,565][00286] ConvEncoder: input_channels=3
|
906 |
+
[2024-08-21 21:16:40,600][00286] Conv encoder output size: 512
|
907 |
+
[2024-08-21 21:16:40,602][00286] Policy head output size: 512
|
908 |
+
[2024-08-21 21:16:40,622][00286] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
|
909 |
+
[2024-08-21 21:16:41,038][00286] Num frames 100...
|
910 |
+
[2024-08-21 21:16:41,160][00286] Num frames 200...
|
911 |
+
[2024-08-21 21:16:41,281][00286] Num frames 300...
|
912 |
+
[2024-08-21 21:16:41,460][00286] Avg episode rewards: #0: 3.840, true rewards: #0: 3.840
|
913 |
+
[2024-08-21 21:16:41,462][00286] Avg episode reward: 3.840, avg true_objective: 3.840
|
914 |
+
[2024-08-21 21:16:41,492][00286] Num frames 400...
|
915 |
+
[2024-08-21 21:16:41,637][00286] Num frames 500...
|
916 |
+
[2024-08-21 21:16:41,757][00286] Num frames 600...
|
917 |
+
[2024-08-21 21:16:41,886][00286] Num frames 700...
|
918 |
+
[2024-08-21 21:16:42,024][00286] Avg episode rewards: #0: 3.840, true rewards: #0: 3.840
|
919 |
+
[2024-08-21 21:16:42,025][00286] Avg episode reward: 3.840, avg true_objective: 3.840
|
920 |
+
[2024-08-21 21:16:42,068][00286] Num frames 800...
|
921 |
+
[2024-08-21 21:16:42,187][00286] Num frames 900...
|
922 |
+
[2024-08-21 21:16:42,311][00286] Num frames 1000...
|
923 |
+
[2024-08-21 21:16:42,443][00286] Num frames 1100...
|
924 |
+
[2024-08-21 21:16:42,564][00286] Num frames 1200...
|
925 |
+
[2024-08-21 21:16:42,713][00286] Avg episode rewards: #0: 4.933, true rewards: #0: 4.267
|
926 |
+
[2024-08-21 21:16:42,714][00286] Avg episode reward: 4.933, avg true_objective: 4.267
|
927 |
+
[2024-08-21 21:16:42,743][00286] Num frames 1300...
|
928 |
+
[2024-08-21 21:16:42,868][00286] Num frames 1400...
|
929 |
+
[2024-08-21 21:16:42,999][00286] Num frames 1500...
|
930 |
+
[2024-08-21 21:16:43,121][00286] Num frames 1600...
|
931 |
+
[2024-08-21 21:16:43,253][00286] Avg episode rewards: #0: 4.660, true rewards: #0: 4.160
|
932 |
+
[2024-08-21 21:16:43,255][00286] Avg episode reward: 4.660, avg true_objective: 4.160
|
933 |
+
[2024-08-21 21:16:43,307][00286] Num frames 1700...
|
934 |
+
[2024-08-21 21:16:43,434][00286] Num frames 1800...
|
935 |
+
[2024-08-21 21:16:43,552][00286] Num frames 1900...
|
936 |
+
[2024-08-21 21:16:43,670][00286] Num frames 2000...
|
937 |
+
[2024-08-21 21:16:43,782][00286] Avg episode rewards: #0: 4.496, true rewards: #0: 4.096
|
938 |
+
[2024-08-21 21:16:43,784][00286] Avg episode reward: 4.496, avg true_objective: 4.096
|
939 |
+
[2024-08-21 21:16:43,846][00286] Num frames 2100...
|
940 |
+
[2024-08-21 21:16:43,975][00286] Num frames 2200...
|
941 |
+
[2024-08-21 21:16:44,093][00286] Num frames 2300...
|
942 |
+
[2024-08-21 21:16:44,209][00286] Num frames 2400...
|
943 |
+
[2024-08-21 21:16:44,384][00286] Avg episode rewards: #0: 4.660, true rewards: #0: 4.160
|
944 |
+
[2024-08-21 21:16:44,385][00286] Avg episode reward: 4.660, avg true_objective: 4.160
|
945 |
+
[2024-08-21 21:16:44,395][00286] Num frames 2500...
|
946 |
+
[2024-08-21 21:16:44,513][00286] Num frames 2600...
|
947 |
+
[2024-08-21 21:16:44,638][00286] Num frames 2700...
|
948 |
+
[2024-08-21 21:16:44,758][00286] Num frames 2800...
|
949 |
+
[2024-08-21 21:16:44,880][00286] Num frames 2900...
|
950 |
+
[2024-08-21 21:16:44,952][00286] Avg episode rewards: #0: 4.874, true rewards: #0: 4.160
|
951 |
+
[2024-08-21 21:16:44,954][00286] Avg episode reward: 4.874, avg true_objective: 4.160
|
952 |
+
[2024-08-21 21:16:45,062][00286] Num frames 3000...
|
953 |
+
[2024-08-21 21:16:45,184][00286] Num frames 3100...
|
954 |
+
[2024-08-21 21:16:45,311][00286] Num frames 3200...
|
955 |
+
[2024-08-21 21:16:45,489][00286] Avg episode rewards: #0: 4.745, true rewards: #0: 4.120
|
956 |
+
[2024-08-21 21:16:45,491][00286] Avg episode reward: 4.745, avg true_objective: 4.120
|
957 |
+
[2024-08-21 21:16:45,501][00286] Num frames 3300...
|
958 |
+
[2024-08-21 21:16:45,621][00286] Num frames 3400...
|
959 |
+
[2024-08-21 21:16:45,743][00286] Num frames 3500...
|
960 |
+
[2024-08-21 21:16:45,863][00286] Avg episode rewards: #0: 4.502, true rewards: #0: 3.947
|
961 |
+
[2024-08-21 21:16:45,866][00286] Avg episode reward: 4.502, avg true_objective: 3.947
|
962 |
+
[2024-08-21 21:16:45,924][00286] Num frames 3600...
|
963 |
+
[2024-08-21 21:16:46,053][00286] Num frames 3700...
|
964 |
+
[2024-08-21 21:16:46,177][00286] Num frames 3800...
|
965 |
+
[2024-08-21 21:16:46,294][00286] Num frames 3900...
|
966 |
+
[2024-08-21 21:16:46,427][00286] Num frames 4000...
|
967 |
+
[2024-08-21 21:16:46,558][00286] Avg episode rewards: #0: 4.764, true rewards: #0: 4.064
|
968 |
+
[2024-08-21 21:16:46,560][00286] Avg episode reward: 4.764, avg true_objective: 4.064
|
969 |
+
[2024-08-21 21:17:06,444][00286] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|