shreyansjain
commited on
Commit
•
ee48fb8
1
Parent(s):
fc62aab
Upload folder using huggingface_hub
Browse files
.summary/0/events.out.tfevents.1683904688.5a963451f5d6
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:edc020094ae3c3181b556ffddd0a65084ec6baf368fcb1c0d9c8f60bbad59a13
|
3 |
+
size 460471
|
README.md
CHANGED
@@ -15,7 +15,7 @@ model-index:
|
|
15 |
type: doom_health_gathering_supreme
|
16 |
metrics:
|
17 |
- type: mean_reward
|
18 |
-
value: 10.
|
19 |
name: mean_reward
|
20 |
verified: false
|
21 |
---
|
|
|
15 |
type: doom_health_gathering_supreme
|
16 |
metrics:
|
17 |
- type: mean_reward
|
18 |
+
value: 10.51 +/- 5.44
|
19 |
name: mean_reward
|
20 |
verified: false
|
21 |
---
|
checkpoint_p0/best_000001308_5357568_reward_29.826.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9a6d72f3ec4f70b9e7d1f66d394f6ece3c4d737089765675b520f193185a8d75
|
3 |
+
size 34928806
|
checkpoint_p0/checkpoint_000001453_5951488.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:49437f88231dfba3134405f30284cfadd9733c3425fee1d3812c56b3b5c0ea53
|
3 |
+
size 34929220
|
checkpoint_p0/checkpoint_000001466_6004736.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:aec137ee5e137d50823930ae100c488936583404690b4a3a2df743a1980ddb71
|
3 |
+
size 34929220
|
config.json
CHANGED
@@ -65,7 +65,7 @@
|
|
65 |
"summaries_use_frameskip": true,
|
66 |
"heartbeat_interval": 20,
|
67 |
"heartbeat_reporting_interval": 600,
|
68 |
-
"train_for_env_steps":
|
69 |
"train_for_seconds": 10000000000,
|
70 |
"save_every_sec": 120,
|
71 |
"keep_checkpoints": 2,
|
|
|
65 |
"summaries_use_frameskip": true,
|
66 |
"heartbeat_interval": 20,
|
67 |
"heartbeat_reporting_interval": 600,
|
68 |
+
"train_for_env_steps": 6000000,
|
69 |
"train_for_seconds": 10000000000,
|
70 |
"save_every_sec": 120,
|
71 |
"keep_checkpoints": 2,
|
replay.mp4
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:99987d0db8d28a385b3b3d30a805c2439129a3329af212f7b4cd3e85c7c2dc29
|
3 |
+
size 20089901
|
sf_log.txt
CHANGED
@@ -1303,3 +1303,1007 @@ main_loop: 1175.7107
|
|
1303 |
[2023-05-12 15:16:18,162][00161] Avg episode rewards: #0: 24.767, true rewards: #0: 10.867
|
1304 |
[2023-05-12 15:16:18,164][00161] Avg episode reward: 24.767, avg true_objective: 10.867
|
1305 |
[2023-05-12 15:17:25,860][00161] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1303 |
[2023-05-12 15:16:18,162][00161] Avg episode rewards: #0: 24.767, true rewards: #0: 10.867
|
1304 |
[2023-05-12 15:16:18,164][00161] Avg episode reward: 24.767, avg true_objective: 10.867
|
1305 |
[2023-05-12 15:17:25,860][00161] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|
1306 |
+
[2023-05-12 15:17:29,065][00161] The model has been pushed to https://huggingface.co/shreyansjain/rl_course_vizdoom_health_gathering_supreme
|
1307 |
+
[2023-05-12 15:18:08,375][00161] Environment doom_basic already registered, overwriting...
|
1308 |
+
[2023-05-12 15:18:08,377][00161] Environment doom_two_colors_easy already registered, overwriting...
|
1309 |
+
[2023-05-12 15:18:08,378][00161] Environment doom_two_colors_hard already registered, overwriting...
|
1310 |
+
[2023-05-12 15:18:08,379][00161] Environment doom_dm already registered, overwriting...
|
1311 |
+
[2023-05-12 15:18:08,380][00161] Environment doom_dwango5 already registered, overwriting...
|
1312 |
+
[2023-05-12 15:18:08,384][00161] Environment doom_my_way_home_flat_actions already registered, overwriting...
|
1313 |
+
[2023-05-12 15:18:08,385][00161] Environment doom_defend_the_center_flat_actions already registered, overwriting...
|
1314 |
+
[2023-05-12 15:18:08,387][00161] Environment doom_my_way_home already registered, overwriting...
|
1315 |
+
[2023-05-12 15:18:08,388][00161] Environment doom_deadly_corridor already registered, overwriting...
|
1316 |
+
[2023-05-12 15:18:08,392][00161] Environment doom_defend_the_center already registered, overwriting...
|
1317 |
+
[2023-05-12 15:18:08,394][00161] Environment doom_defend_the_line already registered, overwriting...
|
1318 |
+
[2023-05-12 15:18:08,395][00161] Environment doom_health_gathering already registered, overwriting...
|
1319 |
+
[2023-05-12 15:18:08,396][00161] Environment doom_health_gathering_supreme already registered, overwriting...
|
1320 |
+
[2023-05-12 15:18:08,398][00161] Environment doom_battle already registered, overwriting...
|
1321 |
+
[2023-05-12 15:18:08,399][00161] Environment doom_battle2 already registered, overwriting...
|
1322 |
+
[2023-05-12 15:18:08,401][00161] Environment doom_duel_bots already registered, overwriting...
|
1323 |
+
[2023-05-12 15:18:08,402][00161] Environment doom_deathmatch_bots already registered, overwriting...
|
1324 |
+
[2023-05-12 15:18:08,403][00161] Environment doom_duel already registered, overwriting...
|
1325 |
+
[2023-05-12 15:18:08,404][00161] Environment doom_deathmatch_full already registered, overwriting...
|
1326 |
+
[2023-05-12 15:18:08,405][00161] Environment doom_benchmark already registered, overwriting...
|
1327 |
+
[2023-05-12 15:18:08,406][00161] register_encoder_factory: <function make_vizdoom_encoder at 0x7f9b6fe4d6c0>
|
1328 |
+
[2023-05-12 15:18:08,438][00161] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json
|
1329 |
+
[2023-05-12 15:18:08,441][00161] Overriding arg 'train_for_env_steps' with value 6000000 passed from command line
|
1330 |
+
[2023-05-12 15:18:08,448][00161] Experiment dir /content/train_dir/default_experiment already exists!
|
1331 |
+
[2023-05-12 15:18:08,450][00161] Resuming existing experiment from /content/train_dir/default_experiment...
|
1332 |
+
[2023-05-12 15:18:08,452][00161] Weights and Biases integration disabled
|
1333 |
+
[2023-05-12 15:18:08,455][00161] Environment var CUDA_VISIBLE_DEVICES is 0
|
1334 |
+
|
1335 |
+
[2023-05-12 15:18:09,872][00161] Starting experiment with the following configuration:
|
1336 |
+
help=False
|
1337 |
+
algo=APPO
|
1338 |
+
env=doom_health_gathering_supreme
|
1339 |
+
experiment=default_experiment
|
1340 |
+
train_dir=/content/train_dir
|
1341 |
+
restart_behavior=resume
|
1342 |
+
device=gpu
|
1343 |
+
seed=None
|
1344 |
+
num_policies=1
|
1345 |
+
async_rl=True
|
1346 |
+
serial_mode=False
|
1347 |
+
batched_sampling=False
|
1348 |
+
num_batches_to_accumulate=2
|
1349 |
+
worker_num_splits=2
|
1350 |
+
policy_workers_per_policy=1
|
1351 |
+
max_policy_lag=1000
|
1352 |
+
num_workers=8
|
1353 |
+
num_envs_per_worker=4
|
1354 |
+
batch_size=1024
|
1355 |
+
num_batches_per_epoch=1
|
1356 |
+
num_epochs=1
|
1357 |
+
rollout=32
|
1358 |
+
recurrence=32
|
1359 |
+
shuffle_minibatches=False
|
1360 |
+
gamma=0.99
|
1361 |
+
reward_scale=1.0
|
1362 |
+
reward_clip=1000.0
|
1363 |
+
value_bootstrap=False
|
1364 |
+
normalize_returns=True
|
1365 |
+
exploration_loss_coeff=0.001
|
1366 |
+
value_loss_coeff=0.5
|
1367 |
+
kl_loss_coeff=0.0
|
1368 |
+
exploration_loss=symmetric_kl
|
1369 |
+
gae_lambda=0.95
|
1370 |
+
ppo_clip_ratio=0.1
|
1371 |
+
ppo_clip_value=0.2
|
1372 |
+
with_vtrace=False
|
1373 |
+
vtrace_rho=1.0
|
1374 |
+
vtrace_c=1.0
|
1375 |
+
optimizer=adam
|
1376 |
+
adam_eps=1e-06
|
1377 |
+
adam_beta1=0.9
|
1378 |
+
adam_beta2=0.999
|
1379 |
+
max_grad_norm=4.0
|
1380 |
+
learning_rate=0.0001
|
1381 |
+
lr_schedule=constant
|
1382 |
+
lr_schedule_kl_threshold=0.008
|
1383 |
+
lr_adaptive_min=1e-06
|
1384 |
+
lr_adaptive_max=0.01
|
1385 |
+
obs_subtract_mean=0.0
|
1386 |
+
obs_scale=255.0
|
1387 |
+
normalize_input=True
|
1388 |
+
normalize_input_keys=None
|
1389 |
+
decorrelate_experience_max_seconds=0
|
1390 |
+
decorrelate_envs_on_one_worker=True
|
1391 |
+
actor_worker_gpus=[]
|
1392 |
+
set_workers_cpu_affinity=True
|
1393 |
+
force_envs_single_thread=False
|
1394 |
+
default_niceness=0
|
1395 |
+
log_to_file=True
|
1396 |
+
experiment_summaries_interval=10
|
1397 |
+
flush_summaries_interval=30
|
1398 |
+
stats_avg=100
|
1399 |
+
summaries_use_frameskip=True
|
1400 |
+
heartbeat_interval=20
|
1401 |
+
heartbeat_reporting_interval=600
|
1402 |
+
train_for_env_steps=6000000
|
1403 |
+
train_for_seconds=10000000000
|
1404 |
+
save_every_sec=120
|
1405 |
+
keep_checkpoints=2
|
1406 |
+
load_checkpoint_kind=latest
|
1407 |
+
save_milestones_sec=-1
|
1408 |
+
save_best_every_sec=5
|
1409 |
+
save_best_metric=reward
|
1410 |
+
save_best_after=100000
|
1411 |
+
benchmark=False
|
1412 |
+
encoder_mlp_layers=[512, 512]
|
1413 |
+
encoder_conv_architecture=convnet_simple
|
1414 |
+
encoder_conv_mlp_layers=[512]
|
1415 |
+
use_rnn=True
|
1416 |
+
rnn_size=512
|
1417 |
+
rnn_type=gru
|
1418 |
+
rnn_num_layers=1
|
1419 |
+
decoder_mlp_layers=[]
|
1420 |
+
nonlinearity=elu
|
1421 |
+
policy_initialization=orthogonal
|
1422 |
+
policy_init_gain=1.0
|
1423 |
+
actor_critic_share_weights=True
|
1424 |
+
adaptive_stddev=True
|
1425 |
+
continuous_tanh_scale=0.0
|
1426 |
+
initial_stddev=1.0
|
1427 |
+
use_env_info_cache=False
|
1428 |
+
env_gpu_actions=False
|
1429 |
+
env_gpu_observations=True
|
1430 |
+
env_frameskip=4
|
1431 |
+
env_framestack=1
|
1432 |
+
pixel_format=CHW
|
1433 |
+
use_record_episode_statistics=False
|
1434 |
+
with_wandb=False
|
1435 |
+
wandb_user=None
|
1436 |
+
wandb_project=sample_factory
|
1437 |
+
wandb_group=None
|
1438 |
+
wandb_job_type=SF
|
1439 |
+
wandb_tags=[]
|
1440 |
+
with_pbt=False
|
1441 |
+
pbt_mix_policies_in_one_env=True
|
1442 |
+
pbt_period_env_steps=5000000
|
1443 |
+
pbt_start_mutation=20000000
|
1444 |
+
pbt_replace_fraction=0.3
|
1445 |
+
pbt_mutation_rate=0.15
|
1446 |
+
pbt_replace_reward_gap=0.1
|
1447 |
+
pbt_replace_reward_gap_absolute=1e-06
|
1448 |
+
pbt_optimize_gamma=False
|
1449 |
+
pbt_target_objective=true_objective
|
1450 |
+
pbt_perturb_min=1.1
|
1451 |
+
pbt_perturb_max=1.5
|
1452 |
+
num_agents=-1
|
1453 |
+
num_humans=0
|
1454 |
+
num_bots=-1
|
1455 |
+
start_bot_difficulty=None
|
1456 |
+
timelimit=None
|
1457 |
+
res_w=128
|
1458 |
+
res_h=72
|
1459 |
+
wide_aspect_ratio=False
|
1460 |
+
eval_env_frameskip=1
|
1461 |
+
fps=35
|
1462 |
+
command_line=--env=doom_health_gathering_supreme --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=4000000
|
1463 |
+
cli_args={'env': 'doom_health_gathering_supreme', 'num_workers': 8, 'num_envs_per_worker': 4, 'train_for_env_steps': 4000000}
|
1464 |
+
git_hash=unknown
|
1465 |
+
git_repo_name=not a git repository
|
1466 |
+
[2023-05-12 15:18:09,876][00161] Saving configuration to /content/train_dir/default_experiment/config.json...
|
1467 |
+
[2023-05-12 15:18:09,883][00161] Rollout worker 0 uses device cpu
|
1468 |
+
[2023-05-12 15:18:09,884][00161] Rollout worker 1 uses device cpu
|
1469 |
+
[2023-05-12 15:18:09,888][00161] Rollout worker 2 uses device cpu
|
1470 |
+
[2023-05-12 15:18:09,890][00161] Rollout worker 3 uses device cpu
|
1471 |
+
[2023-05-12 15:18:09,892][00161] Rollout worker 4 uses device cpu
|
1472 |
+
[2023-05-12 15:18:09,893][00161] Rollout worker 5 uses device cpu
|
1473 |
+
[2023-05-12 15:18:09,894][00161] Rollout worker 6 uses device cpu
|
1474 |
+
[2023-05-12 15:18:09,896][00161] Rollout worker 7 uses device cpu
|
1475 |
+
[2023-05-12 15:18:10,002][00161] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
1476 |
+
[2023-05-12 15:18:10,004][00161] InferenceWorker_p0-w0: min num requests: 2
|
1477 |
+
[2023-05-12 15:18:10,038][00161] Starting all processes...
|
1478 |
+
[2023-05-12 15:18:10,039][00161] Starting process learner_proc0
|
1479 |
+
[2023-05-12 15:18:10,088][00161] Starting all processes...
|
1480 |
+
[2023-05-12 15:18:10,094][00161] Starting process inference_proc0-0
|
1481 |
+
[2023-05-12 15:18:10,094][00161] Starting process rollout_proc0
|
1482 |
+
[2023-05-12 15:18:10,096][00161] Starting process rollout_proc1
|
1483 |
+
[2023-05-12 15:18:10,096][00161] Starting process rollout_proc2
|
1484 |
+
[2023-05-12 15:18:10,096][00161] Starting process rollout_proc3
|
1485 |
+
[2023-05-12 15:18:10,097][00161] Starting process rollout_proc4
|
1486 |
+
[2023-05-12 15:18:10,097][00161] Starting process rollout_proc5
|
1487 |
+
[2023-05-12 15:18:10,097][00161] Starting process rollout_proc6
|
1488 |
+
[2023-05-12 15:18:10,097][00161] Starting process rollout_proc7
|
1489 |
+
[2023-05-12 15:18:21,918][22711] Worker 0 uses CPU cores [0]
|
1490 |
+
[2023-05-12 15:18:21,929][22697] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
1491 |
+
[2023-05-12 15:18:21,929][22697] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0
|
1492 |
+
[2023-05-12 15:18:21,974][22697] Num visible devices: 1
|
1493 |
+
[2023-05-12 15:18:22,007][22697] Starting seed is not provided
|
1494 |
+
[2023-05-12 15:18:22,007][22697] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
1495 |
+
[2023-05-12 15:18:22,007][22697] Initializing actor-critic model on device cuda:0
|
1496 |
+
[2023-05-12 15:18:22,008][22697] RunningMeanStd input shape: (3, 72, 128)
|
1497 |
+
[2023-05-12 15:18:22,009][22697] RunningMeanStd input shape: (1,)
|
1498 |
+
[2023-05-12 15:18:22,079][22697] ConvEncoder: input_channels=3
|
1499 |
+
[2023-05-12 15:18:22,126][22715] Worker 2 uses CPU cores [0]
|
1500 |
+
[2023-05-12 15:18:22,170][22712] Worker 1 uses CPU cores [1]
|
1501 |
+
[2023-05-12 15:18:22,245][22713] Worker 4 uses CPU cores [0]
|
1502 |
+
[2023-05-12 15:18:22,271][22716] Worker 5 uses CPU cores [1]
|
1503 |
+
[2023-05-12 15:18:22,286][22717] Worker 7 uses CPU cores [1]
|
1504 |
+
[2023-05-12 15:18:22,343][22710] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
1505 |
+
[2023-05-12 15:18:22,343][22710] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0
|
1506 |
+
[2023-05-12 15:18:22,373][22710] Num visible devices: 1
|
1507 |
+
[2023-05-12 15:18:22,402][22714] Worker 3 uses CPU cores [1]
|
1508 |
+
[2023-05-12 15:18:22,429][22718] Worker 6 uses CPU cores [0]
|
1509 |
+
[2023-05-12 15:18:22,454][22697] Conv encoder output size: 512
|
1510 |
+
[2023-05-12 15:18:22,454][22697] Policy head output size: 512
|
1511 |
+
[2023-05-12 15:18:22,469][22697] Created Actor Critic model with architecture:
|
1512 |
+
[2023-05-12 15:18:22,469][22697] ActorCriticSharedWeights(
|
1513 |
+
(obs_normalizer): ObservationNormalizer(
|
1514 |
+
(running_mean_std): RunningMeanStdDictInPlace(
|
1515 |
+
(running_mean_std): ModuleDict(
|
1516 |
+
(obs): RunningMeanStdInPlace()
|
1517 |
+
)
|
1518 |
+
)
|
1519 |
+
)
|
1520 |
+
(returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace)
|
1521 |
+
(encoder): VizdoomEncoder(
|
1522 |
+
(basic_encoder): ConvEncoder(
|
1523 |
+
(enc): RecursiveScriptModule(
|
1524 |
+
original_name=ConvEncoderImpl
|
1525 |
+
(conv_head): RecursiveScriptModule(
|
1526 |
+
original_name=Sequential
|
1527 |
+
(0): RecursiveScriptModule(original_name=Conv2d)
|
1528 |
+
(1): RecursiveScriptModule(original_name=ELU)
|
1529 |
+
(2): RecursiveScriptModule(original_name=Conv2d)
|
1530 |
+
(3): RecursiveScriptModule(original_name=ELU)
|
1531 |
+
(4): RecursiveScriptModule(original_name=Conv2d)
|
1532 |
+
(5): RecursiveScriptModule(original_name=ELU)
|
1533 |
+
)
|
1534 |
+
(mlp_layers): RecursiveScriptModule(
|
1535 |
+
original_name=Sequential
|
1536 |
+
(0): RecursiveScriptModule(original_name=Linear)
|
1537 |
+
(1): RecursiveScriptModule(original_name=ELU)
|
1538 |
+
)
|
1539 |
+
)
|
1540 |
+
)
|
1541 |
+
)
|
1542 |
+
(core): ModelCoreRNN(
|
1543 |
+
(core): GRU(512, 512)
|
1544 |
+
)
|
1545 |
+
(decoder): MlpDecoder(
|
1546 |
+
(mlp): Identity()
|
1547 |
+
)
|
1548 |
+
(critic_linear): Linear(in_features=512, out_features=1, bias=True)
|
1549 |
+
(action_parameterization): ActionParameterizationDefault(
|
1550 |
+
(distribution_linear): Linear(in_features=512, out_features=5, bias=True)
|
1551 |
+
)
|
1552 |
+
)
|
1553 |
+
[2023-05-12 15:18:23,933][22697] Using optimizer <class 'torch.optim.adam.Adam'>
|
1554 |
+
[2023-05-12 15:18:23,935][22697] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
|
1555 |
+
[2023-05-12 15:18:23,970][22697] Loading model from checkpoint
|
1556 |
+
[2023-05-12 15:18:23,975][22697] Loaded experiment state at self.train_step=978, self.env_steps=4005888
|
1557 |
+
[2023-05-12 15:18:23,975][22697] Initialized policy 0 weights for model version 978
|
1558 |
+
[2023-05-12 15:18:23,978][22697] LearnerWorker_p0 finished initialization!
|
1559 |
+
[2023-05-12 15:18:23,979][22697] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
1560 |
+
[2023-05-12 15:18:24,240][22710] RunningMeanStd input shape: (3, 72, 128)
|
1561 |
+
[2023-05-12 15:18:24,242][22710] RunningMeanStd input shape: (1,)
|
1562 |
+
[2023-05-12 15:18:24,263][22710] ConvEncoder: input_channels=3
|
1563 |
+
[2023-05-12 15:18:24,385][22710] Conv encoder output size: 512
|
1564 |
+
[2023-05-12 15:18:24,385][22710] Policy head output size: 512
|
1565 |
+
[2023-05-12 15:18:25,682][00161] Inference worker 0-0 is ready!
|
1566 |
+
[2023-05-12 15:18:25,685][00161] All inference workers are ready! Signal rollout workers to start!
|
1567 |
+
[2023-05-12 15:18:25,820][22712] Doom resolution: 160x120, resize resolution: (128, 72)
|
1568 |
+
[2023-05-12 15:18:25,825][22717] Doom resolution: 160x120, resize resolution: (128, 72)
|
1569 |
+
[2023-05-12 15:18:25,837][22718] Doom resolution: 160x120, resize resolution: (128, 72)
|
1570 |
+
[2023-05-12 15:18:25,833][22716] Doom resolution: 160x120, resize resolution: (128, 72)
|
1571 |
+
[2023-05-12 15:18:25,842][22711] Doom resolution: 160x120, resize resolution: (128, 72)
|
1572 |
+
[2023-05-12 15:18:25,847][22714] Doom resolution: 160x120, resize resolution: (128, 72)
|
1573 |
+
[2023-05-12 15:18:25,841][22713] Doom resolution: 160x120, resize resolution: (128, 72)
|
1574 |
+
[2023-05-12 15:18:25,853][22715] Doom resolution: 160x120, resize resolution: (128, 72)
|
1575 |
+
[2023-05-12 15:18:26,700][22711] Decorrelating experience for 0 frames...
|
1576 |
+
[2023-05-12 15:18:26,706][22713] Decorrelating experience for 0 frames...
|
1577 |
+
[2023-05-12 15:18:26,709][22714] Decorrelating experience for 0 frames...
|
1578 |
+
[2023-05-12 15:18:26,714][22716] Decorrelating experience for 0 frames...
|
1579 |
+
[2023-05-12 15:18:27,752][22714] Decorrelating experience for 32 frames...
|
1580 |
+
[2023-05-12 15:18:27,763][22716] Decorrelating experience for 32 frames...
|
1581 |
+
[2023-05-12 15:18:27,775][22717] Decorrelating experience for 0 frames...
|
1582 |
+
[2023-05-12 15:18:28,101][22713] Decorrelating experience for 32 frames...
|
1583 |
+
[2023-05-12 15:18:28,109][22711] Decorrelating experience for 32 frames...
|
1584 |
+
[2023-05-12 15:18:28,170][22718] Decorrelating experience for 0 frames...
|
1585 |
+
[2023-05-12 15:18:28,201][22715] Decorrelating experience for 0 frames...
|
1586 |
+
[2023-05-12 15:18:28,456][00161] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 4005888. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
1587 |
+
[2023-05-12 15:18:29,336][22717] Decorrelating experience for 32 frames...
|
1588 |
+
[2023-05-12 15:18:29,516][22711] Decorrelating experience for 64 frames...
|
1589 |
+
[2023-05-12 15:18:29,575][22714] Decorrelating experience for 64 frames...
|
1590 |
+
[2023-05-12 15:18:29,599][22716] Decorrelating experience for 64 frames...
|
1591 |
+
[2023-05-12 15:18:29,996][00161] Heartbeat connected on Batcher_0
|
1592 |
+
[2023-05-12 15:18:29,998][00161] Heartbeat connected on LearnerWorker_p0
|
1593 |
+
[2023-05-12 15:18:30,056][00161] Heartbeat connected on InferenceWorker_p0-w0
|
1594 |
+
[2023-05-12 15:18:30,079][22712] Decorrelating experience for 0 frames...
|
1595 |
+
[2023-05-12 15:18:31,051][22716] Decorrelating experience for 96 frames...
|
1596 |
+
[2023-05-12 15:18:31,244][00161] Heartbeat connected on RolloutWorker_w5
|
1597 |
+
[2023-05-12 15:18:31,465][22712] Decorrelating experience for 32 frames...
|
1598 |
+
[2023-05-12 15:18:32,008][22715] Decorrelating experience for 32 frames...
|
1599 |
+
[2023-05-12 15:18:32,301][22718] Decorrelating experience for 32 frames...
|
1600 |
+
[2023-05-12 15:18:32,326][22713] Decorrelating experience for 64 frames...
|
1601 |
+
[2023-05-12 15:18:32,647][22711] Decorrelating experience for 96 frames...
|
1602 |
+
[2023-05-12 15:18:32,965][00161] Heartbeat connected on RolloutWorker_w0
|
1603 |
+
[2023-05-12 15:18:33,456][00161] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 4005888. Throughput: 0: 1.6. Samples: 8. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
1604 |
+
[2023-05-12 15:18:33,458][00161] Avg episode reward: [(0, '0.320')]
|
1605 |
+
[2023-05-12 15:18:34,714][22712] Decorrelating experience for 64 frames...
|
1606 |
+
[2023-05-12 15:18:34,947][22713] Decorrelating experience for 96 frames...
|
1607 |
+
[2023-05-12 15:18:34,960][22717] Decorrelating experience for 64 frames...
|
1608 |
+
[2023-05-12 15:18:35,134][22715] Decorrelating experience for 64 frames...
|
1609 |
+
[2023-05-12 15:18:35,296][22718] Decorrelating experience for 64 frames...
|
1610 |
+
[2023-05-12 15:18:35,315][00161] Heartbeat connected on RolloutWorker_w4
|
1611 |
+
[2023-05-12 15:18:38,457][00161] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 4005888. Throughput: 0: 162.8. Samples: 1628. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
1612 |
+
[2023-05-12 15:18:38,463][00161] Avg episode reward: [(0, '3.560')]
|
1613 |
+
[2023-05-12 15:18:38,561][22714] Decorrelating experience for 96 frames...
|
1614 |
+
[2023-05-12 15:18:38,592][22712] Decorrelating experience for 96 frames...
|
1615 |
+
[2023-05-12 15:18:38,777][22717] Decorrelating experience for 96 frames...
|
1616 |
+
[2023-05-12 15:18:39,057][00161] Heartbeat connected on RolloutWorker_w3
|
1617 |
+
[2023-05-12 15:18:39,119][00161] Heartbeat connected on RolloutWorker_w1
|
1618 |
+
[2023-05-12 15:18:39,218][00161] Heartbeat connected on RolloutWorker_w7
|
1619 |
+
[2023-05-12 15:18:39,776][22718] Decorrelating experience for 96 frames...
|
1620 |
+
[2023-05-12 15:18:40,291][00161] Heartbeat connected on RolloutWorker_w6
|
1621 |
+
[2023-05-12 15:18:41,407][22715] Decorrelating experience for 96 frames...
|
1622 |
+
[2023-05-12 15:18:41,683][22697] Signal inference workers to stop experience collection...
|
1623 |
+
[2023-05-12 15:18:41,692][22710] InferenceWorker_p0-w0: stopping experience collection
|
1624 |
+
[2023-05-12 15:18:41,749][00161] Heartbeat connected on RolloutWorker_w2
|
1625 |
+
[2023-05-12 15:18:41,918][22697] Signal inference workers to resume experience collection...
|
1626 |
+
[2023-05-12 15:18:41,919][22710] InferenceWorker_p0-w0: resuming experience collection
|
1627 |
+
[2023-05-12 15:18:43,456][00161] Fps is (10 sec: 1638.4, 60 sec: 1092.3, 300 sec: 1092.3). Total num frames: 4022272. Throughput: 0: 194.3. Samples: 2914. Policy #0 lag: (min: 0.0, avg: 0.9, max: 1.0)
|
1628 |
+
[2023-05-12 15:18:43,458][00161] Avg episode reward: [(0, '7.028')]
|
1629 |
+
[2023-05-12 15:18:48,169][22710] Updated weights for policy 0, policy_version 988 (0.0025)
|
1630 |
+
[2023-05-12 15:18:48,456][00161] Fps is (10 sec: 4096.6, 60 sec: 2048.0, 300 sec: 2048.0). Total num frames: 4046848. Throughput: 0: 447.8. Samples: 8956. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1631 |
+
[2023-05-12 15:18:48,458][00161] Avg episode reward: [(0, '14.569')]
|
1632 |
+
[2023-05-12 15:18:53,458][00161] Fps is (10 sec: 4095.1, 60 sec: 2293.6, 300 sec: 2293.6). Total num frames: 4063232. Throughput: 0: 579.6. Samples: 14492. Policy #0 lag: (min: 0.0, avg: 0.3, max: 2.0)
|
1633 |
+
[2023-05-12 15:18:53,461][00161] Avg episode reward: [(0, '18.060')]
|
1634 |
+
[2023-05-12 15:18:58,456][00161] Fps is (10 sec: 2867.2, 60 sec: 2321.1, 300 sec: 2321.1). Total num frames: 4075520. Throughput: 0: 557.1. Samples: 16712. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
1635 |
+
[2023-05-12 15:18:58,462][00161] Avg episode reward: [(0, '20.069')]
|
1636 |
+
[2023-05-12 15:19:01,961][22710] Updated weights for policy 0, policy_version 998 (0.0018)
|
1637 |
+
[2023-05-12 15:19:03,456][00161] Fps is (10 sec: 2867.9, 60 sec: 2457.6, 300 sec: 2457.6). Total num frames: 4091904. Throughput: 0: 598.3. Samples: 20942. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
1638 |
+
[2023-05-12 15:19:03,462][00161] Avg episode reward: [(0, '20.825')]
|
1639 |
+
[2023-05-12 15:19:08,456][00161] Fps is (10 sec: 4096.0, 60 sec: 2764.8, 300 sec: 2764.8). Total num frames: 4116480. Throughput: 0: 692.1. Samples: 27684. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1640 |
+
[2023-05-12 15:19:08,458][00161] Avg episode reward: [(0, '23.008')]
|
1641 |
+
[2023-05-12 15:19:10,995][22710] Updated weights for policy 0, policy_version 1008 (0.0019)
|
1642 |
+
[2023-05-12 15:19:13,456][00161] Fps is (10 sec: 4505.6, 60 sec: 2912.7, 300 sec: 2912.7). Total num frames: 4136960. Throughput: 0: 690.8. Samples: 31084. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1643 |
+
[2023-05-12 15:19:13,460][00161] Avg episode reward: [(0, '24.567')]
|
1644 |
+
[2023-05-12 15:19:18,457][00161] Fps is (10 sec: 3276.2, 60 sec: 2867.1, 300 sec: 2867.1). Total num frames: 4149248. Throughput: 0: 793.6. Samples: 35722. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1645 |
+
[2023-05-12 15:19:18,463][00161] Avg episode reward: [(0, '25.719')]
|
1646 |
+
[2023-05-12 15:19:23,456][00161] Fps is (10 sec: 2867.2, 60 sec: 2904.5, 300 sec: 2904.5). Total num frames: 4165632. Throughput: 0: 851.3. Samples: 39934. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
1647 |
+
[2023-05-12 15:19:23,462][00161] Avg episode reward: [(0, '26.562')]
|
1648 |
+
[2023-05-12 15:19:24,549][22710] Updated weights for policy 0, policy_version 1018 (0.0035)
|
1649 |
+
[2023-05-12 15:19:28,456][00161] Fps is (10 sec: 3687.0, 60 sec: 3003.7, 300 sec: 3003.7). Total num frames: 4186112. Throughput: 0: 889.9. Samples: 42960. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
1650 |
+
[2023-05-12 15:19:28,467][00161] Avg episode reward: [(0, '26.819')]
|
1651 |
+
[2023-05-12 15:19:33,456][00161] Fps is (10 sec: 4095.9, 60 sec: 3345.1, 300 sec: 3087.8). Total num frames: 4206592. Throughput: 0: 905.0. Samples: 49682. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1652 |
+
[2023-05-12 15:19:33,459][00161] Avg episode reward: [(0, '27.122')]
|
1653 |
+
[2023-05-12 15:19:33,540][22710] Updated weights for policy 0, policy_version 1028 (0.0027)
|
1654 |
+
[2023-05-12 15:19:38,456][00161] Fps is (10 sec: 3686.4, 60 sec: 3618.2, 300 sec: 3101.3). Total num frames: 4222976. Throughput: 0: 888.3. Samples: 54464. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1655 |
+
[2023-05-12 15:19:38,459][00161] Avg episode reward: [(0, '26.159')]
|
1656 |
+
[2023-05-12 15:19:43,456][00161] Fps is (10 sec: 2867.2, 60 sec: 3549.9, 300 sec: 3058.4). Total num frames: 4235264. Throughput: 0: 882.6. Samples: 56430. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
1657 |
+
[2023-05-12 15:19:43,458][00161] Avg episode reward: [(0, '26.395')]
|
1658 |
+
[2023-05-12 15:19:47,682][22710] Updated weights for policy 0, policy_version 1038 (0.0020)
|
1659 |
+
[2023-05-12 15:19:48,456][00161] Fps is (10 sec: 2867.2, 60 sec: 3413.3, 300 sec: 3072.0). Total num frames: 4251648. Throughput: 0: 889.6. Samples: 60972. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1660 |
+
[2023-05-12 15:19:48,460][00161] Avg episode reward: [(0, '25.787')]
|
1661 |
+
[2023-05-12 15:19:53,456][00161] Fps is (10 sec: 4096.0, 60 sec: 3550.0, 300 sec: 3180.4). Total num frames: 4276224. Throughput: 0: 885.2. Samples: 67520. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
1662 |
+
[2023-05-12 15:19:53,464][00161] Avg episode reward: [(0, '24.078')]
|
1663 |
+
[2023-05-12 15:19:58,228][22710] Updated weights for policy 0, policy_version 1048 (0.0014)
|
1664 |
+
[2023-05-12 15:19:58,456][00161] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3185.8). Total num frames: 4292608. Throughput: 0: 875.4. Samples: 70478. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
1665 |
+
[2023-05-12 15:19:58,461][00161] Avg episode reward: [(0, '23.732')]
|
1666 |
+
[2023-05-12 15:20:03,456][00161] Fps is (10 sec: 2867.0, 60 sec: 3549.8, 300 sec: 3147.4). Total num frames: 4304896. Throughput: 0: 860.8. Samples: 74456. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
1667 |
+
[2023-05-12 15:20:03,464][00161] Avg episode reward: [(0, '23.913')]
|
1668 |
+
[2023-05-12 15:20:08,456][00161] Fps is (10 sec: 2867.2, 60 sec: 3413.3, 300 sec: 3153.9). Total num frames: 4321280. Throughput: 0: 862.8. Samples: 78760. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
1669 |
+
[2023-05-12 15:20:08,458][00161] Avg episode reward: [(0, '24.297')]
|
1670 |
+
[2023-05-12 15:20:08,469][22697] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001055_4321280.pth...
|
1671 |
+
[2023-05-12 15:20:08,613][22697] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000894_3661824.pth
|
1672 |
+
[2023-05-12 15:20:11,391][22710] Updated weights for policy 0, policy_version 1058 (0.0025)
|
1673 |
+
[2023-05-12 15:20:13,456][00161] Fps is (10 sec: 3686.6, 60 sec: 3413.3, 300 sec: 3198.8). Total num frames: 4341760. Throughput: 0: 865.0. Samples: 81886. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1674 |
+
[2023-05-12 15:20:13,458][00161] Avg episode reward: [(0, '25.512')]
|
1675 |
+
[2023-05-12 15:20:18,456][00161] Fps is (10 sec: 3686.4, 60 sec: 3481.7, 300 sec: 3202.3). Total num frames: 4358144. Throughput: 0: 858.5. Samples: 88316. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1676 |
+
[2023-05-12 15:20:18,458][00161] Avg episode reward: [(0, '25.608')]
|
1677 |
+
[2023-05-12 15:20:23,189][22710] Updated weights for policy 0, policy_version 1068 (0.0013)
|
1678 |
+
[2023-05-12 15:20:23,463][00161] Fps is (10 sec: 3274.4, 60 sec: 3481.2, 300 sec: 3205.4). Total num frames: 4374528. Throughput: 0: 843.8. Samples: 92442. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1679 |
+
[2023-05-12 15:20:23,466][00161] Avg episode reward: [(0, '26.200')]
|
1680 |
+
[2023-05-12 15:20:28,456][00161] Fps is (10 sec: 2867.1, 60 sec: 3345.1, 300 sec: 3174.4). Total num frames: 4386816. Throughput: 0: 845.3. Samples: 94470. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
1681 |
+
[2023-05-12 15:20:28,466][00161] Avg episode reward: [(0, '27.812')]
|
1682 |
+
[2023-05-12 15:20:33,456][00161] Fps is (10 sec: 3279.2, 60 sec: 3345.1, 300 sec: 3211.3). Total num frames: 4407296. Throughput: 0: 863.1. Samples: 99812. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
1683 |
+
[2023-05-12 15:20:33,458][00161] Avg episode reward: [(0, '27.409')]
|
1684 |
+
[2023-05-12 15:20:34,605][22710] Updated weights for policy 0, policy_version 1078 (0.0028)
|
1685 |
+
[2023-05-12 15:20:38,456][00161] Fps is (10 sec: 4505.7, 60 sec: 3481.6, 300 sec: 3276.8). Total num frames: 4431872. Throughput: 0: 863.8. Samples: 106392. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
1686 |
+
[2023-05-12 15:20:38,458][00161] Avg episode reward: [(0, '25.534')]
|
1687 |
+
[2023-05-12 15:20:43,458][00161] Fps is (10 sec: 3685.7, 60 sec: 3481.5, 300 sec: 3246.4). Total num frames: 4444160. Throughput: 0: 851.6. Samples: 108802. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
|
1688 |
+
[2023-05-12 15:20:43,460][00161] Avg episode reward: [(0, '24.897')]
|
1689 |
+
[2023-05-12 15:20:47,215][22710] Updated weights for policy 0, policy_version 1088 (0.0020)
|
1690 |
+
[2023-05-12 15:20:48,456][00161] Fps is (10 sec: 2457.6, 60 sec: 3413.3, 300 sec: 3218.3). Total num frames: 4456448. Throughput: 0: 853.3. Samples: 112852. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1691 |
+
[2023-05-12 15:20:48,464][00161] Avg episode reward: [(0, '25.315')]
|
1692 |
+
[2023-05-12 15:20:53,456][00161] Fps is (10 sec: 3277.4, 60 sec: 3345.1, 300 sec: 3248.6). Total num frames: 4476928. Throughput: 0: 869.8. Samples: 117902. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
1693 |
+
[2023-05-12 15:20:53,458][00161] Avg episode reward: [(0, '23.019')]
|
1694 |
+
[2023-05-12 15:20:58,305][22710] Updated weights for policy 0, policy_version 1098 (0.0021)
|
1695 |
+
[2023-05-12 15:20:58,456][00161] Fps is (10 sec: 4096.1, 60 sec: 3413.3, 300 sec: 3276.8). Total num frames: 4497408. Throughput: 0: 867.8. Samples: 120938. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1696 |
+
[2023-05-12 15:20:58,460][00161] Avg episode reward: [(0, '24.188')]
|
1697 |
+
[2023-05-12 15:21:03,456][00161] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3276.8). Total num frames: 4513792. Throughput: 0: 850.1. Samples: 126572. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1698 |
+
[2023-05-12 15:21:03,459][00161] Avg episode reward: [(0, '22.206')]
|
1699 |
+
[2023-05-12 15:21:08,457][00161] Fps is (10 sec: 2867.2, 60 sec: 3413.3, 300 sec: 3251.2). Total num frames: 4526080. Throughput: 0: 841.6. Samples: 130310. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1700 |
+
[2023-05-12 15:21:08,465][00161] Avg episode reward: [(0, '21.844')]
|
1701 |
+
[2023-05-12 15:21:12,355][22710] Updated weights for policy 0, policy_version 1108 (0.0021)
|
1702 |
+
[2023-05-12 15:21:13,456][00161] Fps is (10 sec: 2867.2, 60 sec: 3345.1, 300 sec: 3252.0). Total num frames: 4542464. Throughput: 0: 841.1. Samples: 132320. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
1703 |
+
[2023-05-12 15:21:13,461][00161] Avg episode reward: [(0, '22.108')]
|
1704 |
+
[2023-05-12 15:21:18,456][00161] Fps is (10 sec: 3686.4, 60 sec: 3413.3, 300 sec: 3276.8). Total num frames: 4562944. Throughput: 0: 851.7. Samples: 138138. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
1705 |
+
[2023-05-12 15:21:18,463][00161] Avg episode reward: [(0, '21.479')]
|
1706 |
+
[2023-05-12 15:21:22,098][22710] Updated weights for policy 0, policy_version 1118 (0.0018)
|
1707 |
+
[2023-05-12 15:21:23,456][00161] Fps is (10 sec: 3686.4, 60 sec: 3413.8, 300 sec: 3276.8). Total num frames: 4579328. Throughput: 0: 838.6. Samples: 144130. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
1708 |
+
[2023-05-12 15:21:23,464][00161] Avg episode reward: [(0, '20.203')]
|
1709 |
+
[2023-05-12 15:21:28,456][00161] Fps is (10 sec: 2867.2, 60 sec: 3413.3, 300 sec: 3254.1). Total num frames: 4591616. Throughput: 0: 825.5. Samples: 145950. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
1710 |
+
[2023-05-12 15:21:28,460][00161] Avg episode reward: [(0, '19.531')]
|
1711 |
+
[2023-05-12 15:21:33,456][00161] Fps is (10 sec: 2457.5, 60 sec: 3276.8, 300 sec: 3232.5). Total num frames: 4603904. Throughput: 0: 821.5. Samples: 149820. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
1712 |
+
[2023-05-12 15:21:33,462][00161] Avg episode reward: [(0, '20.873')]
|
1713 |
+
[2023-05-12 15:21:36,618][22710] Updated weights for policy 0, policy_version 1128 (0.0020)
|
1714 |
+
[2023-05-12 15:21:38,456][00161] Fps is (10 sec: 3686.4, 60 sec: 3276.8, 300 sec: 3276.8). Total num frames: 4628480. Throughput: 0: 832.8. Samples: 155376. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1715 |
+
[2023-05-12 15:21:38,464][00161] Avg episode reward: [(0, '22.417')]
|
1716 |
+
[2023-05-12 15:21:43,456][00161] Fps is (10 sec: 4505.8, 60 sec: 3413.4, 300 sec: 3297.8). Total num frames: 4648960. Throughput: 0: 837.0. Samples: 158604. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
1717 |
+
[2023-05-12 15:21:43,463][00161] Avg episode reward: [(0, '21.530')]
|
1718 |
+
[2023-05-12 15:21:47,273][22710] Updated weights for policy 0, policy_version 1138 (0.0014)
|
1719 |
+
[2023-05-12 15:21:48,456][00161] Fps is (10 sec: 3276.8, 60 sec: 3413.3, 300 sec: 3276.8). Total num frames: 4661248. Throughput: 0: 832.0. Samples: 164014. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1720 |
+
[2023-05-12 15:21:48,458][00161] Avg episode reward: [(0, '22.429')]
|
1721 |
+
[2023-05-12 15:21:53,456][00161] Fps is (10 sec: 2867.2, 60 sec: 3345.1, 300 sec: 3276.8). Total num frames: 4677632. Throughput: 0: 839.5. Samples: 168088. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1722 |
+
[2023-05-12 15:21:53,462][00161] Avg episode reward: [(0, '23.061')]
|
1723 |
+
[2023-05-12 15:21:58,456][00161] Fps is (10 sec: 3276.7, 60 sec: 3276.8, 300 sec: 3276.8). Total num frames: 4694016. Throughput: 0: 838.2. Samples: 170038. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1724 |
+
[2023-05-12 15:21:58,459][00161] Avg episode reward: [(0, '23.811')]
|
1725 |
+
[2023-05-12 15:21:59,930][22710] Updated weights for policy 0, policy_version 1148 (0.0014)
|
1726 |
+
[2023-05-12 15:22:03,456][00161] Fps is (10 sec: 3686.4, 60 sec: 3345.1, 300 sec: 3295.9). Total num frames: 4714496. Throughput: 0: 852.9. Samples: 176520. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1727 |
+
[2023-05-12 15:22:03,458][00161] Avg episode reward: [(0, '24.500')]
|
1728 |
+
[2023-05-12 15:22:08,456][00161] Fps is (10 sec: 3686.5, 60 sec: 3413.3, 300 sec: 3295.4). Total num frames: 4730880. Throughput: 0: 843.3. Samples: 182080. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1729 |
+
[2023-05-12 15:22:08,463][00161] Avg episode reward: [(0, '25.305')]
|
1730 |
+
[2023-05-12 15:22:08,473][22697] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001155_4730880.pth...
|
1731 |
+
[2023-05-12 15:22:08,666][22697] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth
|
1732 |
+
[2023-05-12 15:22:11,755][22710] Updated weights for policy 0, policy_version 1158 (0.0030)
|
1733 |
+
[2023-05-12 15:22:13,456][00161] Fps is (10 sec: 3276.7, 60 sec: 3413.3, 300 sec: 3295.0). Total num frames: 4747264. Throughput: 0: 848.2. Samples: 184120. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1734 |
+
[2023-05-12 15:22:13,460][00161] Avg episode reward: [(0, '25.632')]
|
1735 |
+
[2023-05-12 15:22:18,456][00161] Fps is (10 sec: 2867.2, 60 sec: 3276.8, 300 sec: 3276.8). Total num frames: 4759552. Throughput: 0: 853.1. Samples: 188210. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
1736 |
+
[2023-05-12 15:22:18,463][00161] Avg episode reward: [(0, '26.031')]
|
1737 |
+
[2023-05-12 15:22:23,406][22710] Updated weights for policy 0, policy_version 1168 (0.0013)
|
1738 |
+
[2023-05-12 15:22:23,456][00161] Fps is (10 sec: 3686.4, 60 sec: 3413.3, 300 sec: 3311.7). Total num frames: 4784128. Throughput: 0: 864.7. Samples: 194286. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1739 |
+
[2023-05-12 15:22:23,458][00161] Avg episode reward: [(0, '26.980')]
|
1740 |
+
[2023-05-12 15:22:28,456][00161] Fps is (10 sec: 4096.0, 60 sec: 3481.6, 300 sec: 3310.9). Total num frames: 4800512. Throughput: 0: 863.9. Samples: 197478. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1741 |
+
[2023-05-12 15:22:28,463][00161] Avg episode reward: [(0, '26.900')]
|
1742 |
+
[2023-05-12 15:22:33,456][00161] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3310.2). Total num frames: 4816896. Throughput: 0: 851.9. Samples: 202350. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1743 |
+
[2023-05-12 15:22:33,459][00161] Avg episode reward: [(0, '26.598')]
|
1744 |
+
[2023-05-12 15:22:36,292][22710] Updated weights for policy 0, policy_version 1178 (0.0015)
|
1745 |
+
[2023-05-12 15:22:38,456][00161] Fps is (10 sec: 2867.1, 60 sec: 3345.0, 300 sec: 3293.2). Total num frames: 4829184. Throughput: 0: 849.7. Samples: 206324. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1746 |
+
[2023-05-12 15:22:38,458][00161] Avg episode reward: [(0, '25.935')]
|
1747 |
+
[2023-05-12 15:22:43,456][00161] Fps is (10 sec: 3276.8, 60 sec: 3345.1, 300 sec: 3308.9). Total num frames: 4849664. Throughput: 0: 858.1. Samples: 208654. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1748 |
+
[2023-05-12 15:22:43,462][00161] Avg episode reward: [(0, '26.116')]
|
1749 |
+
[2023-05-12 15:22:47,036][22710] Updated weights for policy 0, policy_version 1188 (0.0029)
|
1750 |
+
[2023-05-12 15:22:48,456][00161] Fps is (10 sec: 4096.1, 60 sec: 3481.6, 300 sec: 3324.1). Total num frames: 4870144. Throughput: 0: 859.9. Samples: 215214. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
1751 |
+
[2023-05-12 15:22:48,457][00161] Avg episode reward: [(0, '25.939')]
|
1752 |
+
[2023-05-12 15:22:53,456][00161] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3323.2). Total num frames: 4886528. Throughput: 0: 848.3. Samples: 220254. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
1753 |
+
[2023-05-12 15:22:53,461][00161] Avg episode reward: [(0, '26.147')]
|
1754 |
+
[2023-05-12 15:22:58,456][00161] Fps is (10 sec: 2867.2, 60 sec: 3413.3, 300 sec: 3307.1). Total num frames: 4898816. Throughput: 0: 845.4. Samples: 222164. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1755 |
+
[2023-05-12 15:22:58,463][00161] Avg episode reward: [(0, '25.328')]
|
1756 |
+
[2023-05-12 15:23:01,040][22710] Updated weights for policy 0, policy_version 1198 (0.0031)
|
1757 |
+
[2023-05-12 15:23:03,456][00161] Fps is (10 sec: 2867.2, 60 sec: 3345.1, 300 sec: 3306.6). Total num frames: 4915200. Throughput: 0: 846.3. Samples: 226292. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1758 |
+
[2023-05-12 15:23:03,463][00161] Avg episode reward: [(0, '26.424')]
|
1759 |
+
[2023-05-12 15:23:08,456][00161] Fps is (10 sec: 3686.4, 60 sec: 3413.3, 300 sec: 3320.7). Total num frames: 4935680. Throughput: 0: 854.5. Samples: 232740. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1760 |
+
[2023-05-12 15:23:08,458][00161] Avg episode reward: [(0, '25.205')]
|
1761 |
+
[2023-05-12 15:23:10,868][22710] Updated weights for policy 0, policy_version 1208 (0.0016)
|
1762 |
+
[2023-05-12 15:23:13,456][00161] Fps is (10 sec: 3686.3, 60 sec: 3413.3, 300 sec: 3319.9). Total num frames: 4952064. Throughput: 0: 854.0. Samples: 235906. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1763 |
+
[2023-05-12 15:23:13,458][00161] Avg episode reward: [(0, '23.531')]
|
1764 |
+
[2023-05-12 15:23:18,457][00161] Fps is (10 sec: 3276.4, 60 sec: 3481.5, 300 sec: 3319.2). Total num frames: 4968448. Throughput: 0: 842.8. Samples: 240278. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1765 |
+
[2023-05-12 15:23:18,459][00161] Avg episode reward: [(0, '23.912')]
|
1766 |
+
[2023-05-12 15:23:23,456][00161] Fps is (10 sec: 2867.3, 60 sec: 3276.8, 300 sec: 3304.6). Total num frames: 4980736. Throughput: 0: 844.7. Samples: 244336. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1767 |
+
[2023-05-12 15:23:23,461][00161] Avg episode reward: [(0, '23.550')]
|
1768 |
+
[2023-05-12 15:23:24,864][22710] Updated weights for policy 0, policy_version 1218 (0.0017)
|
1769 |
+
[2023-05-12 15:23:28,456][00161] Fps is (10 sec: 3277.2, 60 sec: 3345.1, 300 sec: 3374.0). Total num frames: 5001216. Throughput: 0: 856.5. Samples: 247198. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
1770 |
+
[2023-05-12 15:23:28,462][00161] Avg episode reward: [(0, '23.812')]
|
1771 |
+
[2023-05-12 15:23:33,456][00161] Fps is (10 sec: 4095.9, 60 sec: 3413.3, 300 sec: 3443.4). Total num frames: 5021696. Throughput: 0: 854.6. Samples: 253672. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
1772 |
+
[2023-05-12 15:23:33,459][00161] Avg episode reward: [(0, '22.827')]
|
1773 |
+
[2023-05-12 15:23:35,160][22710] Updated weights for policy 0, policy_version 1228 (0.0015)
|
1774 |
+
[2023-05-12 15:23:38,456][00161] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3443.4). Total num frames: 5038080. Throughput: 0: 846.0. Samples: 258322. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1775 |
+
[2023-05-12 15:23:38,465][00161] Avg episode reward: [(0, '23.443')]
|
1776 |
+
[2023-05-12 15:23:43,456][00161] Fps is (10 sec: 2867.3, 60 sec: 3345.1, 300 sec: 3401.8). Total num frames: 5050368. Throughput: 0: 846.8. Samples: 260270. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
1777 |
+
[2023-05-12 15:23:43,460][00161] Avg episode reward: [(0, '24.282')]
|
1778 |
+
[2023-05-12 15:23:48,329][22710] Updated weights for policy 0, policy_version 1238 (0.0034)
|
1779 |
+
[2023-05-12 15:23:48,456][00161] Fps is (10 sec: 3276.8, 60 sec: 3345.1, 300 sec: 3415.7). Total num frames: 5070848. Throughput: 0: 860.8. Samples: 265030. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1780 |
+
[2023-05-12 15:23:48,458][00161] Avg episode reward: [(0, '25.311')]
|
1781 |
+
[2023-05-12 15:23:53,456][00161] Fps is (10 sec: 4096.0, 60 sec: 3413.3, 300 sec: 3443.4). Total num frames: 5091328. Throughput: 0: 861.0. Samples: 271484. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
1782 |
+
[2023-05-12 15:23:53,459][00161] Avg episode reward: [(0, '26.232')]
|
1783 |
+
[2023-05-12 15:23:58,456][00161] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3443.4). Total num frames: 5107712. Throughput: 0: 857.4. Samples: 274490. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
1784 |
+
[2023-05-12 15:23:58,463][00161] Avg episode reward: [(0, '25.260')]
|
1785 |
+
[2023-05-12 15:23:59,745][22710] Updated weights for policy 0, policy_version 1248 (0.0018)
|
1786 |
+
[2023-05-12 15:24:03,457][00161] Fps is (10 sec: 2867.0, 60 sec: 3413.3, 300 sec: 3401.8). Total num frames: 5120000. Throughput: 0: 847.9. Samples: 278434. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
1787 |
+
[2023-05-12 15:24:03,463][00161] Avg episode reward: [(0, '25.438')]
|
1788 |
+
[2023-05-12 15:24:08,456][00161] Fps is (10 sec: 2867.2, 60 sec: 3345.1, 300 sec: 3387.9). Total num frames: 5136384. Throughput: 0: 850.8. Samples: 282622. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
1789 |
+
[2023-05-12 15:24:08,459][00161] Avg episode reward: [(0, '27.248')]
|
1790 |
+
[2023-05-12 15:24:08,469][22697] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001254_5136384.pth...
|
1791 |
+
[2023-05-12 15:24:08,618][22697] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001055_4321280.pth
|
1792 |
+
[2023-05-12 15:24:12,198][22710] Updated weights for policy 0, policy_version 1258 (0.0029)
|
1793 |
+
[2023-05-12 15:24:13,456][00161] Fps is (10 sec: 3686.7, 60 sec: 3413.3, 300 sec: 3415.7). Total num frames: 5156864. Throughput: 0: 856.4. Samples: 285738. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
1794 |
+
[2023-05-12 15:24:13,458][00161] Avg episode reward: [(0, '27.366')]
|
1795 |
+
[2023-05-12 15:24:18,456][00161] Fps is (10 sec: 4095.8, 60 sec: 3481.6, 300 sec: 3429.5). Total num frames: 5177344. Throughput: 0: 852.6. Samples: 292040. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1796 |
+
[2023-05-12 15:24:18,461][00161] Avg episode reward: [(0, '27.684')]
|
1797 |
+
[2023-05-12 15:24:23,463][00161] Fps is (10 sec: 3274.4, 60 sec: 3481.2, 300 sec: 3401.7). Total num frames: 5189632. Throughput: 0: 843.0. Samples: 296264. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
1798 |
+
[2023-05-12 15:24:23,466][00161] Avg episode reward: [(0, '27.194')]
|
1799 |
+
[2023-05-12 15:24:24,748][22710] Updated weights for policy 0, policy_version 1268 (0.0016)
|
1800 |
+
[2023-05-12 15:24:28,457][00161] Fps is (10 sec: 2457.4, 60 sec: 3345.0, 300 sec: 3374.0). Total num frames: 5201920. Throughput: 0: 843.2. Samples: 298216. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1801 |
+
[2023-05-12 15:24:28,460][00161] Avg episode reward: [(0, '26.857')]
|
1802 |
+
[2023-05-12 15:24:33,461][00161] Fps is (10 sec: 3277.4, 60 sec: 3344.8, 300 sec: 3387.8). Total num frames: 5222400. Throughput: 0: 846.5. Samples: 303126. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
1803 |
+
[2023-05-12 15:24:33,464][00161] Avg episode reward: [(0, '26.960')]
|
1804 |
+
[2023-05-12 15:24:36,023][22710] Updated weights for policy 0, policy_version 1278 (0.0021)
|
1805 |
+
[2023-05-12 15:24:38,456][00161] Fps is (10 sec: 4096.5, 60 sec: 3413.3, 300 sec: 3415.6). Total num frames: 5242880. Throughput: 0: 846.8. Samples: 309592. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
1806 |
+
[2023-05-12 15:24:38,459][00161] Avg episode reward: [(0, '26.592')]
|
1807 |
+
[2023-05-12 15:24:43,456][00161] Fps is (10 sec: 3688.5, 60 sec: 3481.6, 300 sec: 3415.6). Total num frames: 5259264. Throughput: 0: 839.5. Samples: 312268. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1808 |
+
[2023-05-12 15:24:43,462][00161] Avg episode reward: [(0, '26.572')]
|
1809 |
+
[2023-05-12 15:24:48,456][00161] Fps is (10 sec: 2867.2, 60 sec: 3345.1, 300 sec: 3374.0). Total num frames: 5271552. Throughput: 0: 842.0. Samples: 316324. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
1810 |
+
[2023-05-12 15:24:48,463][00161] Avg episode reward: [(0, '26.433')]
|
1811 |
+
[2023-05-12 15:24:49,215][22710] Updated weights for policy 0, policy_version 1288 (0.0039)
|
1812 |
+
[2023-05-12 15:24:53,456][00161] Fps is (10 sec: 2867.2, 60 sec: 3276.8, 300 sec: 3374.0). Total num frames: 5287936. Throughput: 0: 853.2. Samples: 321018. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1813 |
+
[2023-05-12 15:24:53,463][00161] Avg episode reward: [(0, '26.639')]
|
1814 |
+
[2023-05-12 15:24:58,456][00161] Fps is (10 sec: 4096.1, 60 sec: 3413.3, 300 sec: 3415.7). Total num frames: 5312512. Throughput: 0: 856.3. Samples: 324272. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1815 |
+
[2023-05-12 15:24:58,459][00161] Avg episode reward: [(0, '27.579')]
|
1816 |
+
[2023-05-12 15:24:59,457][22710] Updated weights for policy 0, policy_version 1298 (0.0015)
|
1817 |
+
[2023-05-12 15:25:03,458][00161] Fps is (10 sec: 4095.0, 60 sec: 3481.5, 300 sec: 3415.6). Total num frames: 5328896. Throughput: 0: 856.6. Samples: 330590. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1818 |
+
[2023-05-12 15:25:03,460][00161] Avg episode reward: [(0, '27.778')]
|
1819 |
+
[2023-05-12 15:25:08,456][00161] Fps is (10 sec: 2867.2, 60 sec: 3413.3, 300 sec: 3387.9). Total num frames: 5341184. Throughput: 0: 851.4. Samples: 334572. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
1820 |
+
[2023-05-12 15:25:08,463][00161] Avg episode reward: [(0, '29.597')]
|
1821 |
+
[2023-05-12 15:25:08,478][22697] Saving new best policy, reward=29.597!
|
1822 |
+
[2023-05-12 15:25:13,456][00161] Fps is (10 sec: 2458.2, 60 sec: 3276.8, 300 sec: 3374.0). Total num frames: 5353472. Throughput: 0: 852.2. Samples: 336562. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1823 |
+
[2023-05-12 15:25:13,461][00161] Avg episode reward: [(0, '29.826')]
|
1824 |
+
[2023-05-12 15:25:13,530][22710] Updated weights for policy 0, policy_version 1308 (0.0015)
|
1825 |
+
[2023-05-12 15:25:13,534][22697] Saving new best policy, reward=29.826!
|
1826 |
+
[2023-05-12 15:25:18,456][00161] Fps is (10 sec: 3686.4, 60 sec: 3345.1, 300 sec: 3401.8). Total num frames: 5378048. Throughput: 0: 860.4. Samples: 341838. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
1827 |
+
[2023-05-12 15:25:18,458][00161] Avg episode reward: [(0, '28.669')]
|
1828 |
+
[2023-05-12 15:25:23,194][22710] Updated weights for policy 0, policy_version 1318 (0.0018)
|
1829 |
+
[2023-05-12 15:25:23,456][00161] Fps is (10 sec: 4505.6, 60 sec: 3482.0, 300 sec: 3429.5). Total num frames: 5398528. Throughput: 0: 860.0. Samples: 348294. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
1830 |
+
[2023-05-12 15:25:23,458][00161] Avg episode reward: [(0, '27.754')]
|
1831 |
+
[2023-05-12 15:25:28,456][00161] Fps is (10 sec: 3276.7, 60 sec: 3481.7, 300 sec: 3401.8). Total num frames: 5410816. Throughput: 0: 850.4. Samples: 350536. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0)
|
1832 |
+
[2023-05-12 15:25:28,461][00161] Avg episode reward: [(0, '27.521')]
|
1833 |
+
[2023-05-12 15:25:33,456][00161] Fps is (10 sec: 2457.5, 60 sec: 3345.3, 300 sec: 3360.1). Total num frames: 5423104. Throughput: 0: 848.7. Samples: 354516. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
1834 |
+
[2023-05-12 15:25:33,465][00161] Avg episode reward: [(0, '27.238')]
|
1835 |
+
[2023-05-12 15:25:37,344][22710] Updated weights for policy 0, policy_version 1328 (0.0020)
|
1836 |
+
[2023-05-12 15:25:38,456][00161] Fps is (10 sec: 3276.9, 60 sec: 3345.1, 300 sec: 3387.9). Total num frames: 5443584. Throughput: 0: 853.9. Samples: 359444. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1837 |
+
[2023-05-12 15:25:38,458][00161] Avg episode reward: [(0, '25.111')]
|
1838 |
+
[2023-05-12 15:25:43,456][00161] Fps is (10 sec: 4096.2, 60 sec: 3413.3, 300 sec: 3415.7). Total num frames: 5464064. Throughput: 0: 854.9. Samples: 362744. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1839 |
+
[2023-05-12 15:25:43,461][00161] Avg episode reward: [(0, '23.414')]
|
1840 |
+
[2023-05-12 15:25:47,469][22710] Updated weights for policy 0, policy_version 1338 (0.0031)
|
1841 |
+
[2023-05-12 15:25:48,456][00161] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3401.8). Total num frames: 5480448. Throughput: 0: 846.4. Samples: 368676. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1842 |
+
[2023-05-12 15:25:48,461][00161] Avg episode reward: [(0, '24.372')]
|
1843 |
+
[2023-05-12 15:25:53,456][00161] Fps is (10 sec: 2867.1, 60 sec: 3413.3, 300 sec: 3374.0). Total num frames: 5492736. Throughput: 0: 847.5. Samples: 372708. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1844 |
+
[2023-05-12 15:25:53,465][00161] Avg episode reward: [(0, '25.409')]
|
1845 |
+
[2023-05-12 15:25:58,458][00161] Fps is (10 sec: 2866.6, 60 sec: 3276.7, 300 sec: 3374.0). Total num frames: 5509120. Throughput: 0: 846.8. Samples: 374668. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1846 |
+
[2023-05-12 15:25:58,466][00161] Avg episode reward: [(0, '25.042')]
|
1847 |
+
[2023-05-12 15:26:00,711][22710] Updated weights for policy 0, policy_version 1348 (0.0029)
|
1848 |
+
[2023-05-12 15:26:03,456][00161] Fps is (10 sec: 3686.5, 60 sec: 3345.2, 300 sec: 3401.8). Total num frames: 5529600. Throughput: 0: 860.5. Samples: 380560. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1849 |
+
[2023-05-12 15:26:03,466][00161] Avg episode reward: [(0, '23.433')]
|
1850 |
+
[2023-05-12 15:26:08,457][00161] Fps is (10 sec: 4096.2, 60 sec: 3481.5, 300 sec: 3415.6). Total num frames: 5550080. Throughput: 0: 856.2. Samples: 386824. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1851 |
+
[2023-05-12 15:26:08,460][00161] Avg episode reward: [(0, '22.979')]
|
1852 |
+
[2023-05-12 15:26:08,469][22697] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001355_5550080.pth...
|
1853 |
+
[2023-05-12 15:26:08,664][22697] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001155_4730880.pth
|
1854 |
+
[2023-05-12 15:26:12,216][22710] Updated weights for policy 0, policy_version 1358 (0.0040)
|
1855 |
+
[2023-05-12 15:26:13,461][00161] Fps is (10 sec: 3275.0, 60 sec: 3481.3, 300 sec: 3387.8). Total num frames: 5562368. Throughput: 0: 848.3. Samples: 388712. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1856 |
+
[2023-05-12 15:26:13,464][00161] Avg episode reward: [(0, '23.679')]
|
1857 |
+
[2023-05-12 15:26:18,456][00161] Fps is (10 sec: 2867.7, 60 sec: 3345.1, 300 sec: 3387.9). Total num frames: 5578752. Throughput: 0: 849.9. Samples: 392762. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1858 |
+
[2023-05-12 15:26:18,463][00161] Avg episode reward: [(0, '23.329')]
|
1859 |
+
[2023-05-12 15:26:23,456][00161] Fps is (10 sec: 3688.3, 60 sec: 3345.1, 300 sec: 3415.6). Total num frames: 5599232. Throughput: 0: 860.6. Samples: 398172. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
1860 |
+
[2023-05-12 15:26:23,464][00161] Avg episode reward: [(0, '23.450')]
|
1861 |
+
[2023-05-12 15:26:24,391][22710] Updated weights for policy 0, policy_version 1368 (0.0013)
|
1862 |
+
[2023-05-12 15:26:28,456][00161] Fps is (10 sec: 4096.0, 60 sec: 3481.6, 300 sec: 3443.4). Total num frames: 5619712. Throughput: 0: 857.0. Samples: 401310. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1863 |
+
[2023-05-12 15:26:28,463][00161] Avg episode reward: [(0, '23.739')]
|
1864 |
+
[2023-05-12 15:26:33,456][00161] Fps is (10 sec: 3276.9, 60 sec: 3481.6, 300 sec: 3401.8). Total num frames: 5632000. Throughput: 0: 845.5. Samples: 406724. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1865 |
+
[2023-05-12 15:26:33,458][00161] Avg episode reward: [(0, '23.989')]
|
1866 |
+
[2023-05-12 15:26:37,083][22710] Updated weights for policy 0, policy_version 1378 (0.0016)
|
1867 |
+
[2023-05-12 15:26:38,456][00161] Fps is (10 sec: 2457.6, 60 sec: 3345.1, 300 sec: 3374.0). Total num frames: 5644288. Throughput: 0: 842.5. Samples: 410622. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1868 |
+
[2023-05-12 15:26:38,465][00161] Avg episode reward: [(0, '23.383')]
|
1869 |
+
[2023-05-12 15:26:43,456][00161] Fps is (10 sec: 2867.2, 60 sec: 3276.8, 300 sec: 3387.9). Total num frames: 5660672. Throughput: 0: 842.7. Samples: 412586. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
1870 |
+
[2023-05-12 15:26:43,464][00161] Avg episode reward: [(0, '24.549')]
|
1871 |
+
[2023-05-12 15:26:48,260][22710] Updated weights for policy 0, policy_version 1388 (0.0027)
|
1872 |
+
[2023-05-12 15:26:48,456][00161] Fps is (10 sec: 4096.0, 60 sec: 3413.3, 300 sec: 3415.6). Total num frames: 5685248. Throughput: 0: 848.9. Samples: 418760. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
1873 |
+
[2023-05-12 15:26:48,463][00161] Avg episode reward: [(0, '25.485')]
|
1874 |
+
[2023-05-12 15:26:53,456][00161] Fps is (10 sec: 4096.1, 60 sec: 3481.6, 300 sec: 3415.7). Total num frames: 5701632. Throughput: 0: 840.5. Samples: 424646. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
1875 |
+
[2023-05-12 15:26:53,464][00161] Avg episode reward: [(0, '22.950')]
|
1876 |
+
[2023-05-12 15:26:58,456][00161] Fps is (10 sec: 3276.8, 60 sec: 3481.7, 300 sec: 3401.8). Total num frames: 5718016. Throughput: 0: 842.9. Samples: 426638. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
1877 |
+
[2023-05-12 15:26:58,459][00161] Avg episode reward: [(0, '22.815')]
|
1878 |
+
[2023-05-12 15:27:01,711][22710] Updated weights for policy 0, policy_version 1398 (0.0017)
|
1879 |
+
[2023-05-12 15:27:03,456][00161] Fps is (10 sec: 2867.2, 60 sec: 3345.1, 300 sec: 3387.9). Total num frames: 5730304. Throughput: 0: 841.8. Samples: 430642. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1880 |
+
[2023-05-12 15:27:03,461][00161] Avg episode reward: [(0, '22.857')]
|
1881 |
+
[2023-05-12 15:27:08,456][00161] Fps is (10 sec: 3276.8, 60 sec: 3345.2, 300 sec: 3401.8). Total num frames: 5750784. Throughput: 0: 849.1. Samples: 436382. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1882 |
+
[2023-05-12 15:27:08,458][00161] Avg episode reward: [(0, '22.859')]
|
1883 |
+
[2023-05-12 15:27:11,935][22710] Updated weights for policy 0, policy_version 1408 (0.0017)
|
1884 |
+
[2023-05-12 15:27:13,456][00161] Fps is (10 sec: 4096.0, 60 sec: 3481.9, 300 sec: 3429.5). Total num frames: 5771264. Throughput: 0: 850.9. Samples: 439602. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1885 |
+
[2023-05-12 15:27:13,458][00161] Avg episode reward: [(0, '22.883')]
|
1886 |
+
[2023-05-12 15:27:18,456][00161] Fps is (10 sec: 3686.3, 60 sec: 3481.6, 300 sec: 3401.8). Total num frames: 5787648. Throughput: 0: 845.5. Samples: 444774. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
1887 |
+
[2023-05-12 15:27:18,463][00161] Avg episode reward: [(0, '23.158')]
|
1888 |
+
[2023-05-12 15:27:23,456][00161] Fps is (10 sec: 2867.2, 60 sec: 3345.1, 300 sec: 3387.9). Total num frames: 5799936. Throughput: 0: 848.1. Samples: 448786. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
1889 |
+
[2023-05-12 15:27:23,464][00161] Avg episode reward: [(0, '22.899')]
|
1890 |
+
[2023-05-12 15:27:25,890][22710] Updated weights for policy 0, policy_version 1418 (0.0030)
|
1891 |
+
[2023-05-12 15:27:28,456][00161] Fps is (10 sec: 2867.2, 60 sec: 3276.8, 300 sec: 3387.9). Total num frames: 5816320. Throughput: 0: 850.3. Samples: 450848. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
1892 |
+
[2023-05-12 15:27:28,459][00161] Avg episode reward: [(0, '24.810')]
|
1893 |
+
[2023-05-12 15:27:33,456][00161] Fps is (10 sec: 3686.4, 60 sec: 3413.3, 300 sec: 3415.7). Total num frames: 5836800. Throughput: 0: 850.7. Samples: 457042. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
1894 |
+
[2023-05-12 15:27:33,459][00161] Avg episode reward: [(0, '25.770')]
|
1895 |
+
[2023-05-12 15:27:35,909][22710] Updated weights for policy 0, policy_version 1428 (0.0012)
|
1896 |
+
[2023-05-12 15:27:38,456][00161] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3401.8). Total num frames: 5853184. Throughput: 0: 842.4. Samples: 462556. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1897 |
+
[2023-05-12 15:27:38,458][00161] Avg episode reward: [(0, '27.069')]
|
1898 |
+
[2023-05-12 15:27:43,456][00161] Fps is (10 sec: 2867.2, 60 sec: 3413.3, 300 sec: 3374.0). Total num frames: 5865472. Throughput: 0: 842.4. Samples: 464546. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1899 |
+
[2023-05-12 15:27:43,459][00161] Avg episode reward: [(0, '25.146')]
|
1900 |
+
[2023-05-12 15:27:48,456][00161] Fps is (10 sec: 2867.3, 60 sec: 3276.8, 300 sec: 3374.0). Total num frames: 5881856. Throughput: 0: 843.3. Samples: 468590. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
1901 |
+
[2023-05-12 15:27:48,461][00161] Avg episode reward: [(0, '24.545')]
|
1902 |
+
[2023-05-12 15:27:49,689][22710] Updated weights for policy 0, policy_version 1438 (0.0028)
|
1903 |
+
[2023-05-12 15:27:53,456][00161] Fps is (10 sec: 3686.4, 60 sec: 3345.1, 300 sec: 3401.8). Total num frames: 5902336. Throughput: 0: 854.7. Samples: 474844. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1904 |
+
[2023-05-12 15:27:53,462][00161] Avg episode reward: [(0, '25.359')]
|
1905 |
+
[2023-05-12 15:27:58,456][00161] Fps is (10 sec: 4096.0, 60 sec: 3413.3, 300 sec: 3415.6). Total num frames: 5922816. Throughput: 0: 854.0. Samples: 478032. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
1906 |
+
[2023-05-12 15:27:58,461][00161] Avg episode reward: [(0, '25.412')]
|
1907 |
+
[2023-05-12 15:28:00,577][22710] Updated weights for policy 0, policy_version 1448 (0.0013)
|
1908 |
+
[2023-05-12 15:28:03,459][00161] Fps is (10 sec: 3275.9, 60 sec: 3413.2, 300 sec: 3387.8). Total num frames: 5935104. Throughput: 0: 840.3. Samples: 482588. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1909 |
+
[2023-05-12 15:28:03,461][00161] Avg episode reward: [(0, '25.439')]
|
1910 |
+
[2023-05-12 15:28:08,456][00161] Fps is (10 sec: 2867.2, 60 sec: 3345.1, 300 sec: 3387.9). Total num frames: 5951488. Throughput: 0: 841.8. Samples: 486668. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1911 |
+
[2023-05-12 15:28:08,458][00161] Avg episode reward: [(0, '25.013')]
|
1912 |
+
[2023-05-12 15:28:08,474][22697] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001453_5951488.pth...
|
1913 |
+
[2023-05-12 15:28:08,665][22697] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001254_5136384.pth
|
1914 |
+
[2023-05-12 15:28:13,348][22710] Updated weights for policy 0, policy_version 1458 (0.0014)
|
1915 |
+
[2023-05-12 15:28:13,456][00161] Fps is (10 sec: 3687.4, 60 sec: 3345.1, 300 sec: 3401.8). Total num frames: 5971968. Throughput: 0: 852.5. Samples: 489212. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1916 |
+
[2023-05-12 15:28:13,458][00161] Avg episode reward: [(0, '25.185')]
|
1917 |
+
[2023-05-12 15:28:18,461][00161] Fps is (10 sec: 4093.8, 60 sec: 3413.0, 300 sec: 3429.5). Total num frames: 5992448. Throughput: 0: 858.6. Samples: 495684. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1918 |
+
[2023-05-12 15:28:18,464][00161] Avg episode reward: [(0, '26.132')]
|
1919 |
+
[2023-05-12 15:28:21,761][22697] Stopping Batcher_0...
|
1920 |
+
[2023-05-12 15:28:21,761][22697] Loop batcher_evt_loop terminating...
|
1921 |
+
[2023-05-12 15:28:21,762][00161] Component Batcher_0 stopped!
|
1922 |
+
[2023-05-12 15:28:21,765][22697] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001466_6004736.pth...
|
1923 |
+
[2023-05-12 15:28:21,877][22710] Weights refcount: 2 0
|
1924 |
+
[2023-05-12 15:28:21,884][00161] Component InferenceWorker_p0-w0 stopped!
|
1925 |
+
[2023-05-12 15:28:21,890][00161] Component RolloutWorker_w7 stopped!
|
1926 |
+
[2023-05-12 15:28:21,893][00161] Component RolloutWorker_w5 stopped!
|
1927 |
+
[2023-05-12 15:28:21,886][22710] Stopping InferenceWorker_p0-w0...
|
1928 |
+
[2023-05-12 15:28:21,896][22710] Loop inference_proc0-0_evt_loop terminating...
|
1929 |
+
[2023-05-12 15:28:21,892][22717] Stopping RolloutWorker_w7...
|
1930 |
+
[2023-05-12 15:28:21,899][22717] Loop rollout_proc7_evt_loop terminating...
|
1931 |
+
[2023-05-12 15:28:21,914][00161] Component RolloutWorker_w1 stopped!
|
1932 |
+
[2023-05-12 15:28:21,916][22712] Stopping RolloutWorker_w1...
|
1933 |
+
[2023-05-12 15:28:21,917][22712] Loop rollout_proc1_evt_loop terminating...
|
1934 |
+
[2023-05-12 15:28:21,895][22716] Stopping RolloutWorker_w5...
|
1935 |
+
[2023-05-12 15:28:21,920][22716] Loop rollout_proc5_evt_loop terminating...
|
1936 |
+
[2023-05-12 15:28:21,940][00161] Component RolloutWorker_w3 stopped!
|
1937 |
+
[2023-05-12 15:28:21,942][22714] Stopping RolloutWorker_w3...
|
1938 |
+
[2023-05-12 15:28:21,949][22714] Loop rollout_proc3_evt_loop terminating...
|
1939 |
+
[2023-05-12 15:28:21,976][22711] Stopping RolloutWorker_w0...
|
1940 |
+
[2023-05-12 15:28:21,994][22711] Loop rollout_proc0_evt_loop terminating...
|
1941 |
+
[2023-05-12 15:28:21,993][00161] Component RolloutWorker_w0 stopped!
|
1942 |
+
[2023-05-12 15:28:22,015][00161] Component RolloutWorker_w2 stopped!
|
1943 |
+
[2023-05-12 15:28:22,015][22715] Stopping RolloutWorker_w2...
|
1944 |
+
[2023-05-12 15:28:22,029][00161] Component RolloutWorker_w6 stopped!
|
1945 |
+
[2023-05-12 15:28:22,031][22718] Stopping RolloutWorker_w6...
|
1946 |
+
[2023-05-12 15:28:22,031][22718] Loop rollout_proc6_evt_loop terminating...
|
1947 |
+
[2023-05-12 15:28:22,022][22715] Loop rollout_proc2_evt_loop terminating...
|
1948 |
+
[2023-05-12 15:28:22,042][22697] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001355_5550080.pth
|
1949 |
+
[2023-05-12 15:28:22,072][22713] Stopping RolloutWorker_w4...
|
1950 |
+
[2023-05-12 15:28:22,072][00161] Component RolloutWorker_w4 stopped!
|
1951 |
+
[2023-05-12 15:28:22,092][22713] Loop rollout_proc4_evt_loop terminating...
|
1952 |
+
[2023-05-12 15:28:22,102][22697] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001466_6004736.pth...
|
1953 |
+
[2023-05-12 15:28:22,389][22697] Stopping LearnerWorker_p0...
|
1954 |
+
[2023-05-12 15:28:22,390][22697] Loop learner_proc0_evt_loop terminating...
|
1955 |
+
[2023-05-12 15:28:22,401][00161] Component LearnerWorker_p0 stopped!
|
1956 |
+
[2023-05-12 15:28:22,403][00161] Waiting for process learner_proc0 to stop...
|
1957 |
+
[2023-05-12 15:28:23,959][00161] Waiting for process inference_proc0-0 to join...
|
1958 |
+
[2023-05-12 15:28:24,352][00161] Waiting for process rollout_proc0 to join...
|
1959 |
+
[2023-05-12 15:28:25,962][00161] Waiting for process rollout_proc1 to join...
|
1960 |
+
[2023-05-12 15:28:25,965][00161] Waiting for process rollout_proc2 to join...
|
1961 |
+
[2023-05-12 15:28:25,967][00161] Waiting for process rollout_proc3 to join...
|
1962 |
+
[2023-05-12 15:28:25,968][00161] Waiting for process rollout_proc4 to join...
|
1963 |
+
[2023-05-12 15:28:25,969][00161] Waiting for process rollout_proc5 to join...
|
1964 |
+
[2023-05-12 15:28:25,970][00161] Waiting for process rollout_proc6 to join...
|
1965 |
+
[2023-05-12 15:28:25,971][00161] Waiting for process rollout_proc7 to join...
|
1966 |
+
[2023-05-12 15:28:25,972][00161] Batcher 0 profile tree view:
|
1967 |
+
batching: 14.1607, releasing_batches: 0.0143
|
1968 |
+
[2023-05-12 15:28:25,973][00161] InferenceWorker_p0-w0 profile tree view:
|
1969 |
+
wait_policy: 0.0091
|
1970 |
+
wait_policy_total: 280.4199
|
1971 |
+
update_model: 4.1678
|
1972 |
+
weight_update: 0.0012
|
1973 |
+
one_step: 0.0026
|
1974 |
+
handle_policy_step: 289.6667
|
1975 |
+
deserialize: 7.7935, stack: 1.6742, obs_to_device_normalize: 61.3258, forward: 145.5804, send_messages: 15.2365
|
1976 |
+
prepare_outputs: 43.8332
|
1977 |
+
to_cpu: 26.5021
|
1978 |
+
[2023-05-12 15:28:25,975][00161] Learner 0 profile tree view:
|
1979 |
+
misc: 0.0030, prepare_batch: 10.0330
|
1980 |
+
train: 40.8455
|
1981 |
+
epoch_init: 0.0040, minibatch_init: 0.0098, losses_postprocess: 0.2881, kl_divergence: 0.3692, after_optimizer: 1.9399
|
1982 |
+
calculate_losses: 12.5988
|
1983 |
+
losses_init: 0.0016, forward_head: 1.1112, bptt_initial: 7.7923, tail: 0.6108, advantages_returns: 0.1711, losses: 1.6312
|
1984 |
+
bptt: 1.1033
|
1985 |
+
bptt_forward_core: 1.0700
|
1986 |
+
update: 25.2126
|
1987 |
+
clip: 0.7803
|
1988 |
+
[2023-05-12 15:28:25,976][00161] RolloutWorker_w0 profile tree view:
|
1989 |
+
wait_for_trajectories: 0.1897, enqueue_policy_requests: 78.5061, env_step: 444.0168, overhead: 12.3809, complete_rollouts: 3.6582
|
1990 |
+
save_policy_outputs: 11.4830
|
1991 |
+
split_output_tensors: 5.4602
|
1992 |
+
[2023-05-12 15:28:25,978][00161] RolloutWorker_w7 profile tree view:
|
1993 |
+
wait_for_trajectories: 0.2509, enqueue_policy_requests: 75.6875, env_step: 440.2742, overhead: 12.5337, complete_rollouts: 3.9811
|
1994 |
+
save_policy_outputs: 11.4313
|
1995 |
+
split_output_tensors: 5.4628
|
1996 |
+
[2023-05-12 15:28:25,979][00161] Loop Runner_EvtLoop terminating...
|
1997 |
+
[2023-05-12 15:28:25,981][00161] Runner profile tree view:
|
1998 |
+
main_loop: 615.9429
|
1999 |
+
[2023-05-12 15:28:25,982][00161] Collected {0: 6004736}, FPS: 3245.2
|
2000 |
+
[2023-05-12 15:28:35,343][00161] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json
|
2001 |
+
[2023-05-12 15:28:35,345][00161] Overriding arg 'num_workers' with value 1 passed from command line
|
2002 |
+
[2023-05-12 15:28:35,347][00161] Adding new argument 'no_render'=True that is not in the saved config file!
|
2003 |
+
[2023-05-12 15:28:35,350][00161] Adding new argument 'save_video'=True that is not in the saved config file!
|
2004 |
+
[2023-05-12 15:28:35,352][00161] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
|
2005 |
+
[2023-05-12 15:28:35,354][00161] Adding new argument 'video_name'=None that is not in the saved config file!
|
2006 |
+
[2023-05-12 15:28:35,356][00161] Adding new argument 'max_num_frames'=1000000000.0 that is not in the saved config file!
|
2007 |
+
[2023-05-12 15:28:35,357][00161] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
|
2008 |
+
[2023-05-12 15:28:35,358][00161] Adding new argument 'push_to_hub'=False that is not in the saved config file!
|
2009 |
+
[2023-05-12 15:28:35,359][00161] Adding new argument 'hf_repository'=None that is not in the saved config file!
|
2010 |
+
[2023-05-12 15:28:35,361][00161] Adding new argument 'policy_index'=0 that is not in the saved config file!
|
2011 |
+
[2023-05-12 15:28:35,362][00161] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
|
2012 |
+
[2023-05-12 15:28:35,363][00161] Adding new argument 'train_script'=None that is not in the saved config file!
|
2013 |
+
[2023-05-12 15:28:35,364][00161] Adding new argument 'enjoy_script'=None that is not in the saved config file!
|
2014 |
+
[2023-05-12 15:28:35,366][00161] Using frameskip 1 and render_action_repeat=4 for evaluation
|
2015 |
+
[2023-05-12 15:28:35,388][00161] RunningMeanStd input shape: (3, 72, 128)
|
2016 |
+
[2023-05-12 15:28:35,389][00161] RunningMeanStd input shape: (1,)
|
2017 |
+
[2023-05-12 15:28:35,408][00161] ConvEncoder: input_channels=3
|
2018 |
+
[2023-05-12 15:28:35,443][00161] Conv encoder output size: 512
|
2019 |
+
[2023-05-12 15:28:35,445][00161] Policy head output size: 512
|
2020 |
+
[2023-05-12 15:28:35,464][00161] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001466_6004736.pth...
|
2021 |
+
[2023-05-12 15:28:35,954][00161] Num frames 100...
|
2022 |
+
[2023-05-12 15:28:36,085][00161] Num frames 200...
|
2023 |
+
[2023-05-12 15:28:36,206][00161] Num frames 300...
|
2024 |
+
[2023-05-12 15:28:36,328][00161] Num frames 400...
|
2025 |
+
[2023-05-12 15:28:36,446][00161] Num frames 500...
|
2026 |
+
[2023-05-12 15:28:36,568][00161] Num frames 600...
|
2027 |
+
[2023-05-12 15:28:36,685][00161] Num frames 700...
|
2028 |
+
[2023-05-12 15:28:36,803][00161] Num frames 800...
|
2029 |
+
[2023-05-12 15:28:36,927][00161] Num frames 900...
|
2030 |
+
[2023-05-12 15:28:37,080][00161] Num frames 1000...
|
2031 |
+
[2023-05-12 15:28:37,251][00161] Num frames 1100...
|
2032 |
+
[2023-05-12 15:28:37,417][00161] Num frames 1200...
|
2033 |
+
[2023-05-12 15:28:37,576][00161] Num frames 1300...
|
2034 |
+
[2023-05-12 15:28:37,746][00161] Num frames 1400...
|
2035 |
+
[2023-05-12 15:28:37,929][00161] Num frames 1500...
|
2036 |
+
[2023-05-12 15:28:38,093][00161] Avg episode rewards: #0: 42.680, true rewards: #0: 15.680
|
2037 |
+
[2023-05-12 15:28:38,095][00161] Avg episode reward: 42.680, avg true_objective: 15.680
|
2038 |
+
[2023-05-12 15:28:38,160][00161] Num frames 1600...
|
2039 |
+
[2023-05-12 15:28:38,320][00161] Num frames 1700...
|
2040 |
+
[2023-05-12 15:28:38,480][00161] Num frames 1800...
|
2041 |
+
[2023-05-12 15:28:38,644][00161] Num frames 1900...
|
2042 |
+
[2023-05-12 15:28:38,807][00161] Num frames 2000...
|
2043 |
+
[2023-05-12 15:28:38,972][00161] Num frames 2100...
|
2044 |
+
[2023-05-12 15:28:39,142][00161] Num frames 2200...
|
2045 |
+
[2023-05-12 15:28:39,327][00161] Num frames 2300...
|
2046 |
+
[2023-05-12 15:28:39,507][00161] Num frames 2400...
|
2047 |
+
[2023-05-12 15:28:39,631][00161] Avg episode rewards: #0: 31.685, true rewards: #0: 12.185
|
2048 |
+
[2023-05-12 15:28:39,633][00161] Avg episode reward: 31.685, avg true_objective: 12.185
|
2049 |
+
[2023-05-12 15:28:39,738][00161] Num frames 2500...
|
2050 |
+
[2023-05-12 15:28:39,909][00161] Num frames 2600...
|
2051 |
+
[2023-05-12 15:28:40,076][00161] Num frames 2700...
|
2052 |
+
[2023-05-12 15:28:40,248][00161] Num frames 2800...
|
2053 |
+
[2023-05-12 15:28:40,416][00161] Num frames 2900...
|
2054 |
+
[2023-05-12 15:28:40,591][00161] Num frames 3000...
|
2055 |
+
[2023-05-12 15:28:40,759][00161] Num frames 3100...
|
2056 |
+
[2023-05-12 15:28:40,948][00161] Avg episode rewards: #0: 25.243, true rewards: #0: 10.577
|
2057 |
+
[2023-05-12 15:28:40,951][00161] Avg episode reward: 25.243, avg true_objective: 10.577
|
2058 |
+
[2023-05-12 15:28:41,000][00161] Num frames 3200...
|
2059 |
+
[2023-05-12 15:28:41,170][00161] Num frames 3300...
|
2060 |
+
[2023-05-12 15:28:41,338][00161] Num frames 3400...
|
2061 |
+
[2023-05-12 15:28:41,502][00161] Num frames 3500...
|
2062 |
+
[2023-05-12 15:28:41,671][00161] Num frames 3600...
|
2063 |
+
[2023-05-12 15:28:41,838][00161] Num frames 3700...
|
2064 |
+
[2023-05-12 15:28:42,028][00161] Avg episode rewards: #0: 21.703, true rewards: #0: 9.452
|
2065 |
+
[2023-05-12 15:28:42,030][00161] Avg episode reward: 21.703, avg true_objective: 9.452
|
2066 |
+
[2023-05-12 15:28:42,055][00161] Num frames 3800...
|
2067 |
+
[2023-05-12 15:28:42,170][00161] Num frames 3900...
|
2068 |
+
[2023-05-12 15:28:42,294][00161] Num frames 4000...
|
2069 |
+
[2023-05-12 15:28:42,422][00161] Num frames 4100...
|
2070 |
+
[2023-05-12 15:28:42,545][00161] Num frames 4200...
|
2071 |
+
[2023-05-12 15:28:42,661][00161] Num frames 4300...
|
2072 |
+
[2023-05-12 15:28:42,778][00161] Num frames 4400...
|
2073 |
+
[2023-05-12 15:28:42,896][00161] Num frames 4500...
|
2074 |
+
[2023-05-12 15:28:43,014][00161] Num frames 4600...
|
2075 |
+
[2023-05-12 15:28:43,138][00161] Num frames 4700...
|
2076 |
+
[2023-05-12 15:28:43,262][00161] Num frames 4800...
|
2077 |
+
[2023-05-12 15:28:43,380][00161] Num frames 4900...
|
2078 |
+
[2023-05-12 15:28:43,501][00161] Num frames 5000...
|
2079 |
+
[2023-05-12 15:28:43,618][00161] Num frames 5100...
|
2080 |
+
[2023-05-12 15:28:43,736][00161] Num frames 5200...
|
2081 |
+
[2023-05-12 15:28:43,856][00161] Num frames 5300...
|
2082 |
+
[2023-05-12 15:28:43,973][00161] Num frames 5400...
|
2083 |
+
[2023-05-12 15:28:44,089][00161] Num frames 5500...
|
2084 |
+
[2023-05-12 15:28:44,205][00161] Num frames 5600...
|
2085 |
+
[2023-05-12 15:28:44,325][00161] Num frames 5700...
|
2086 |
+
[2023-05-12 15:28:44,450][00161] Num frames 5800...
|
2087 |
+
[2023-05-12 15:28:44,600][00161] Avg episode rewards: #0: 29.162, true rewards: #0: 11.762
|
2088 |
+
[2023-05-12 15:28:44,602][00161] Avg episode reward: 29.162, avg true_objective: 11.762
|
2089 |
+
[2023-05-12 15:28:44,629][00161] Num frames 5900...
|
2090 |
+
[2023-05-12 15:28:44,746][00161] Num frames 6000...
|
2091 |
+
[2023-05-12 15:28:44,870][00161] Num frames 6100...
|
2092 |
+
[2023-05-12 15:28:44,996][00161] Num frames 6200...
|
2093 |
+
[2023-05-12 15:28:45,129][00161] Num frames 6300...
|
2094 |
+
[2023-05-12 15:28:45,247][00161] Num frames 6400...
|
2095 |
+
[2023-05-12 15:28:45,384][00161] Num frames 6500...
|
2096 |
+
[2023-05-12 15:28:45,503][00161] Num frames 6600...
|
2097 |
+
[2023-05-12 15:28:45,622][00161] Num frames 6700...
|
2098 |
+
[2023-05-12 15:28:45,751][00161] Num frames 6800...
|
2099 |
+
[2023-05-12 15:28:45,875][00161] Num frames 6900...
|
2100 |
+
[2023-05-12 15:28:45,998][00161] Num frames 7000...
|
2101 |
+
[2023-05-12 15:28:46,128][00161] Num frames 7100...
|
2102 |
+
[2023-05-12 15:28:46,253][00161] Num frames 7200...
|
2103 |
+
[2023-05-12 15:28:46,381][00161] Num frames 7300...
|
2104 |
+
[2023-05-12 15:28:46,501][00161] Num frames 7400...
|
2105 |
+
[2023-05-12 15:28:46,587][00161] Avg episode rewards: #0: 31.371, true rewards: #0: 12.372
|
2106 |
+
[2023-05-12 15:28:46,589][00161] Avg episode reward: 31.371, avg true_objective: 12.372
|
2107 |
+
[2023-05-12 15:28:46,683][00161] Num frames 7500...
|
2108 |
+
[2023-05-12 15:28:46,803][00161] Num frames 7600...
|
2109 |
+
[2023-05-12 15:28:46,926][00161] Num frames 7700...
|
2110 |
+
[2023-05-12 15:28:47,047][00161] Num frames 7800...
|
2111 |
+
[2023-05-12 15:28:47,162][00161] Num frames 7900...
|
2112 |
+
[2023-05-12 15:28:47,296][00161] Avg episode rewards: #0: 27.953, true rewards: #0: 11.381
|
2113 |
+
[2023-05-12 15:28:47,298][00161] Avg episode reward: 27.953, avg true_objective: 11.381
|
2114 |
+
[2023-05-12 15:28:47,345][00161] Num frames 8000...
|
2115 |
+
[2023-05-12 15:28:47,471][00161] Num frames 8100...
|
2116 |
+
[2023-05-12 15:28:47,598][00161] Num frames 8200...
|
2117 |
+
[2023-05-12 15:28:47,724][00161] Num frames 8300...
|
2118 |
+
[2023-05-12 15:28:47,846][00161] Num frames 8400...
|
2119 |
+
[2023-05-12 15:28:47,969][00161] Num frames 8500...
|
2120 |
+
[2023-05-12 15:28:48,093][00161] Num frames 8600...
|
2121 |
+
[2023-05-12 15:28:48,213][00161] Num frames 8700...
|
2122 |
+
[2023-05-12 15:28:48,342][00161] Num frames 8800...
|
2123 |
+
[2023-05-12 15:28:48,468][00161] Num frames 8900...
|
2124 |
+
[2023-05-12 15:28:48,592][00161] Num frames 9000...
|
2125 |
+
[2023-05-12 15:28:48,706][00161] Num frames 9100...
|
2126 |
+
[2023-05-12 15:28:48,823][00161] Num frames 9200...
|
2127 |
+
[2023-05-12 15:28:48,949][00161] Num frames 9300...
|
2128 |
+
[2023-05-12 15:28:49,069][00161] Num frames 9400...
|
2129 |
+
[2023-05-12 15:28:49,211][00161] Avg episode rewards: #0: 28.838, true rewards: #0: 11.839
|
2130 |
+
[2023-05-12 15:28:49,213][00161] Avg episode reward: 28.838, avg true_objective: 11.839
|
2131 |
+
[2023-05-12 15:28:49,250][00161] Num frames 9500...
|
2132 |
+
[2023-05-12 15:28:49,382][00161] Num frames 9600...
|
2133 |
+
[2023-05-12 15:28:49,504][00161] Num frames 9700...
|
2134 |
+
[2023-05-12 15:28:49,621][00161] Num frames 9800...
|
2135 |
+
[2023-05-12 15:28:49,742][00161] Num frames 9900...
|
2136 |
+
[2023-05-12 15:28:49,865][00161] Num frames 10000...
|
2137 |
+
[2023-05-12 15:28:50,012][00161] Avg episode rewards: #0: 27.088, true rewards: #0: 11.199
|
2138 |
+
[2023-05-12 15:28:50,013][00161] Avg episode reward: 27.088, avg true_objective: 11.199
|
2139 |
+
[2023-05-12 15:28:50,053][00161] Num frames 10100...
|
2140 |
+
[2023-05-12 15:28:50,170][00161] Num frames 10200...
|
2141 |
+
[2023-05-12 15:28:50,289][00161] Num frames 10300...
|
2142 |
+
[2023-05-12 15:28:50,417][00161] Num frames 10400...
|
2143 |
+
[2023-05-12 15:28:50,539][00161] Num frames 10500...
|
2144 |
+
[2023-05-12 15:28:50,655][00161] Num frames 10600...
|
2145 |
+
[2023-05-12 15:28:50,774][00161] Num frames 10700...
|
2146 |
+
[2023-05-12 15:28:50,899][00161] Num frames 10800...
|
2147 |
+
[2023-05-12 15:28:51,025][00161] Num frames 10900...
|
2148 |
+
[2023-05-12 15:28:51,145][00161] Num frames 11000...
|
2149 |
+
[2023-05-12 15:28:51,267][00161] Num frames 11100...
|
2150 |
+
[2023-05-12 15:28:51,401][00161] Num frames 11200...
|
2151 |
+
[2023-05-12 15:28:51,522][00161] Num frames 11300...
|
2152 |
+
[2023-05-12 15:28:51,660][00161] Num frames 11400...
|
2153 |
+
[2023-05-12 15:28:51,787][00161] Num frames 11500...
|
2154 |
+
[2023-05-12 15:28:51,913][00161] Num frames 11600...
|
2155 |
+
[2023-05-12 15:28:52,063][00161] Num frames 11700...
|
2156 |
+
[2023-05-12 15:28:52,233][00161] Num frames 11800...
|
2157 |
+
[2023-05-12 15:28:52,414][00161] Num frames 11900...
|
2158 |
+
[2023-05-12 15:28:52,589][00161] Num frames 12000...
|
2159 |
+
[2023-05-12 15:28:52,814][00161] Avg episode rewards: #0: 29.997, true rewards: #0: 12.097
|
2160 |
+
[2023-05-12 15:28:52,821][00161] Avg episode reward: 29.997, avg true_objective: 12.097
|
2161 |
+
[2023-05-12 15:28:52,829][00161] Num frames 12100...
|
2162 |
+
[2023-05-12 15:30:09,178][00161] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|
2163 |
+
[2023-05-12 15:30:09,895][00161] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json
|
2164 |
+
[2023-05-12 15:30:09,897][00161] Overriding arg 'num_workers' with value 1 passed from command line
|
2165 |
+
[2023-05-12 15:30:09,898][00161] Adding new argument 'no_render'=True that is not in the saved config file!
|
2166 |
+
[2023-05-12 15:30:09,900][00161] Adding new argument 'save_video'=True that is not in the saved config file!
|
2167 |
+
[2023-05-12 15:30:09,902][00161] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
|
2168 |
+
[2023-05-12 15:30:09,903][00161] Adding new argument 'video_name'=None that is not in the saved config file!
|
2169 |
+
[2023-05-12 15:30:09,905][00161] Adding new argument 'max_num_frames'=100000 that is not in the saved config file!
|
2170 |
+
[2023-05-12 15:30:09,906][00161] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
|
2171 |
+
[2023-05-12 15:30:09,908][00161] Adding new argument 'push_to_hub'=True that is not in the saved config file!
|
2172 |
+
[2023-05-12 15:30:09,908][00161] Adding new argument 'hf_repository'='shreyansjain/rl_course_vizdoom_health_gathering_supreme' that is not in the saved config file!
|
2173 |
+
[2023-05-12 15:30:09,909][00161] Adding new argument 'policy_index'=0 that is not in the saved config file!
|
2174 |
+
[2023-05-12 15:30:09,910][00161] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
|
2175 |
+
[2023-05-12 15:30:09,911][00161] Adding new argument 'train_script'=None that is not in the saved config file!
|
2176 |
+
[2023-05-12 15:30:09,912][00161] Adding new argument 'enjoy_script'=None that is not in the saved config file!
|
2177 |
+
[2023-05-12 15:30:09,913][00161] Using frameskip 1 and render_action_repeat=4 for evaluation
|
2178 |
+
[2023-05-12 15:30:09,935][00161] RunningMeanStd input shape: (3, 72, 128)
|
2179 |
+
[2023-05-12 15:30:09,938][00161] RunningMeanStd input shape: (1,)
|
2180 |
+
[2023-05-12 15:30:09,955][00161] ConvEncoder: input_channels=3
|
2181 |
+
[2023-05-12 15:30:10,009][00161] Conv encoder output size: 512
|
2182 |
+
[2023-05-12 15:30:10,010][00161] Policy head output size: 512
|
2183 |
+
[2023-05-12 15:30:10,036][00161] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001466_6004736.pth...
|
2184 |
+
[2023-05-12 15:30:10,819][00161] Num frames 100...
|
2185 |
+
[2023-05-12 15:30:11,002][00161] Num frames 200...
|
2186 |
+
[2023-05-12 15:30:11,185][00161] Num frames 300...
|
2187 |
+
[2023-05-12 15:30:11,264][00161] Avg episode rewards: #0: 3.080, true rewards: #0: 3.080
|
2188 |
+
[2023-05-12 15:30:11,266][00161] Avg episode reward: 3.080, avg true_objective: 3.080
|
2189 |
+
[2023-05-12 15:30:11,437][00161] Num frames 400...
|
2190 |
+
[2023-05-12 15:30:11,626][00161] Num frames 500...
|
2191 |
+
[2023-05-12 15:30:11,818][00161] Num frames 600...
|
2192 |
+
[2023-05-12 15:30:12,019][00161] Num frames 700...
|
2193 |
+
[2023-05-12 15:30:12,243][00161] Avg episode rewards: #0: 4.940, true rewards: #0: 3.940
|
2194 |
+
[2023-05-12 15:30:12,246][00161] Avg episode reward: 4.940, avg true_objective: 3.940
|
2195 |
+
[2023-05-12 15:30:12,273][00161] Num frames 800...
|
2196 |
+
[2023-05-12 15:30:12,475][00161] Num frames 900...
|
2197 |
+
[2023-05-12 15:30:12,676][00161] Num frames 1000...
|
2198 |
+
[2023-05-12 15:30:12,874][00161] Num frames 1100...
|
2199 |
+
[2023-05-12 15:30:13,065][00161] Num frames 1200...
|
2200 |
+
[2023-05-12 15:30:13,267][00161] Num frames 1300...
|
2201 |
+
[2023-05-12 15:30:13,387][00161] Avg episode rewards: #0: 6.107, true rewards: #0: 4.440
|
2202 |
+
[2023-05-12 15:30:13,390][00161] Avg episode reward: 6.107, avg true_objective: 4.440
|
2203 |
+
[2023-05-12 15:30:13,528][00161] Num frames 1400...
|
2204 |
+
[2023-05-12 15:30:13,725][00161] Num frames 1500...
|
2205 |
+
[2023-05-12 15:30:13,911][00161] Num frames 1600...
|
2206 |
+
[2023-05-12 15:30:14,125][00161] Num frames 1700...
|
2207 |
+
[2023-05-12 15:30:14,340][00161] Num frames 1800...
|
2208 |
+
[2023-05-12 15:30:14,542][00161] Num frames 1900...
|
2209 |
+
[2023-05-12 15:30:14,775][00161] Num frames 2000...
|
2210 |
+
[2023-05-12 15:30:15,009][00161] Num frames 2100...
|
2211 |
+
[2023-05-12 15:30:15,222][00161] Num frames 2200...
|
2212 |
+
[2023-05-12 15:30:15,412][00161] Avg episode rewards: #0: 10.193, true rewards: #0: 5.692
|
2213 |
+
[2023-05-12 15:30:15,414][00161] Avg episode reward: 10.193, avg true_objective: 5.692
|
2214 |
+
[2023-05-12 15:30:15,463][00161] Num frames 2300...
|
2215 |
+
[2023-05-12 15:30:15,670][00161] Num frames 2400...
|
2216 |
+
[2023-05-12 15:30:15,883][00161] Num frames 2500...
|
2217 |
+
[2023-05-12 15:30:16,093][00161] Num frames 2600...
|
2218 |
+
[2023-05-12 15:30:16,312][00161] Num frames 2700...
|
2219 |
+
[2023-05-12 15:30:16,527][00161] Num frames 2800...
|
2220 |
+
[2023-05-12 15:30:16,738][00161] Num frames 2900...
|
2221 |
+
[2023-05-12 15:30:16,950][00161] Num frames 3000...
|
2222 |
+
[2023-05-12 15:30:17,158][00161] Num frames 3100...
|
2223 |
+
[2023-05-12 15:30:17,353][00161] Num frames 3200...
|
2224 |
+
[2023-05-12 15:30:17,566][00161] Num frames 3300...
|
2225 |
+
[2023-05-12 15:30:17,630][00161] Avg episode rewards: #0: 12.802, true rewards: #0: 6.602
|
2226 |
+
[2023-05-12 15:30:17,632][00161] Avg episode reward: 12.802, avg true_objective: 6.602
|
2227 |
+
[2023-05-12 15:30:17,801][00161] Num frames 3400...
|
2228 |
+
[2023-05-12 15:30:17,968][00161] Num frames 3500...
|
2229 |
+
[2023-05-12 15:30:18,172][00161] Num frames 3600...
|
2230 |
+
[2023-05-12 15:30:18,358][00161] Num frames 3700...
|
2231 |
+
[2023-05-12 15:30:18,541][00161] Num frames 3800...
|
2232 |
+
[2023-05-12 15:30:18,737][00161] Num frames 3900...
|
2233 |
+
[2023-05-12 15:30:18,913][00161] Num frames 4000...
|
2234 |
+
[2023-05-12 15:30:19,079][00161] Num frames 4100...
|
2235 |
+
[2023-05-12 15:30:19,219][00161] Avg episode rewards: #0: 14.237, true rewards: #0: 6.903
|
2236 |
+
[2023-05-12 15:30:19,221][00161] Avg episode reward: 14.237, avg true_objective: 6.903
|
2237 |
+
[2023-05-12 15:30:19,316][00161] Num frames 4200...
|
2238 |
+
[2023-05-12 15:30:19,480][00161] Num frames 4300...
|
2239 |
+
[2023-05-12 15:30:19,648][00161] Num frames 4400...
|
2240 |
+
[2023-05-12 15:30:19,816][00161] Num frames 4500...
|
2241 |
+
[2023-05-12 15:30:19,978][00161] Num frames 4600...
|
2242 |
+
[2023-05-12 15:30:20,135][00161] Num frames 4700...
|
2243 |
+
[2023-05-12 15:30:20,307][00161] Num frames 4800...
|
2244 |
+
[2023-05-12 15:30:20,454][00161] Num frames 4900...
|
2245 |
+
[2023-05-12 15:30:20,574][00161] Num frames 5000...
|
2246 |
+
[2023-05-12 15:30:20,690][00161] Num frames 5100...
|
2247 |
+
[2023-05-12 15:30:20,804][00161] Num frames 5200...
|
2248 |
+
[2023-05-12 15:30:20,969][00161] Avg episode rewards: #0: 15.849, true rewards: #0: 7.563
|
2249 |
+
[2023-05-12 15:30:20,971][00161] Avg episode reward: 15.849, avg true_objective: 7.563
|
2250 |
+
[2023-05-12 15:30:20,981][00161] Num frames 5300...
|
2251 |
+
[2023-05-12 15:30:21,098][00161] Num frames 5400...
|
2252 |
+
[2023-05-12 15:30:21,227][00161] Num frames 5500...
|
2253 |
+
[2023-05-12 15:30:21,351][00161] Num frames 5600...
|
2254 |
+
[2023-05-12 15:30:21,474][00161] Num frames 5700...
|
2255 |
+
[2023-05-12 15:30:21,594][00161] Num frames 5800...
|
2256 |
+
[2023-05-12 15:30:21,711][00161] Num frames 5900...
|
2257 |
+
[2023-05-12 15:30:21,832][00161] Num frames 6000...
|
2258 |
+
[2023-05-12 15:30:21,959][00161] Num frames 6100...
|
2259 |
+
[2023-05-12 15:30:22,080][00161] Num frames 6200...
|
2260 |
+
[2023-05-12 15:30:22,197][00161] Num frames 6300...
|
2261 |
+
[2023-05-12 15:30:22,320][00161] Num frames 6400...
|
2262 |
+
[2023-05-12 15:30:22,438][00161] Num frames 6500...
|
2263 |
+
[2023-05-12 15:30:22,597][00161] Avg episode rewards: #0: 17.860, true rewards: #0: 8.235
|
2264 |
+
[2023-05-12 15:30:22,599][00161] Avg episode reward: 17.860, avg true_objective: 8.235
|
2265 |
+
[2023-05-12 15:30:22,616][00161] Num frames 6600...
|
2266 |
+
[2023-05-12 15:30:22,735][00161] Num frames 6700...
|
2267 |
+
[2023-05-12 15:30:22,860][00161] Num frames 6800...
|
2268 |
+
[2023-05-12 15:30:22,993][00161] Num frames 6900...
|
2269 |
+
[2023-05-12 15:30:23,115][00161] Num frames 7000...
|
2270 |
+
[2023-05-12 15:30:23,231][00161] Num frames 7100...
|
2271 |
+
[2023-05-12 15:30:23,355][00161] Num frames 7200...
|
2272 |
+
[2023-05-12 15:30:23,476][00161] Num frames 7300...
|
2273 |
+
[2023-05-12 15:30:23,597][00161] Num frames 7400...
|
2274 |
+
[2023-05-12 15:30:23,713][00161] Num frames 7500...
|
2275 |
+
[2023-05-12 15:30:23,832][00161] Num frames 7600...
|
2276 |
+
[2023-05-12 15:30:23,950][00161] Num frames 7700...
|
2277 |
+
[2023-05-12 15:30:24,067][00161] Num frames 7800...
|
2278 |
+
[2023-05-12 15:30:24,184][00161] Num frames 7900...
|
2279 |
+
[2023-05-12 15:30:24,311][00161] Num frames 8000...
|
2280 |
+
[2023-05-12 15:30:24,429][00161] Num frames 8100...
|
2281 |
+
[2023-05-12 15:30:24,552][00161] Num frames 8200...
|
2282 |
+
[2023-05-12 15:30:24,672][00161] Num frames 8300...
|
2283 |
+
[2023-05-12 15:30:24,793][00161] Num frames 8400...
|
2284 |
+
[2023-05-12 15:30:24,863][00161] Avg episode rewards: #0: 21.012, true rewards: #0: 9.346
|
2285 |
+
[2023-05-12 15:30:24,864][00161] Avg episode reward: 21.012, avg true_objective: 9.346
|
2286 |
+
[2023-05-12 15:30:24,971][00161] Num frames 8500...
|
2287 |
+
[2023-05-12 15:30:25,093][00161] Num frames 8600...
|
2288 |
+
[2023-05-12 15:30:25,217][00161] Num frames 8700...
|
2289 |
+
[2023-05-12 15:30:25,339][00161] Num frames 8800...
|
2290 |
+
[2023-05-12 15:30:25,459][00161] Num frames 8900...
|
2291 |
+
[2023-05-12 15:30:25,588][00161] Num frames 9000...
|
2292 |
+
[2023-05-12 15:30:25,737][00161] Num frames 9100...
|
2293 |
+
[2023-05-12 15:30:25,857][00161] Num frames 9200...
|
2294 |
+
[2023-05-12 15:30:25,976][00161] Num frames 9300...
|
2295 |
+
[2023-05-12 15:30:26,101][00161] Num frames 9400...
|
2296 |
+
[2023-05-12 15:30:26,223][00161] Num frames 9500...
|
2297 |
+
[2023-05-12 15:30:26,350][00161] Num frames 9600...
|
2298 |
+
[2023-05-12 15:30:26,468][00161] Num frames 9700...
|
2299 |
+
[2023-05-12 15:30:26,588][00161] Num frames 9800...
|
2300 |
+
[2023-05-12 15:30:26,702][00161] Num frames 9900...
|
2301 |
+
[2023-05-12 15:30:26,829][00161] Num frames 10000...
|
2302 |
+
[2023-05-12 15:30:26,998][00161] Num frames 10100...
|
2303 |
+
[2023-05-12 15:30:27,161][00161] Num frames 10200...
|
2304 |
+
[2023-05-12 15:30:27,328][00161] Num frames 10300...
|
2305 |
+
[2023-05-12 15:30:27,449][00161] Num frames 10400...
|
2306 |
+
[2023-05-12 15:30:27,570][00161] Num frames 10500...
|
2307 |
+
[2023-05-12 15:30:27,640][00161] Avg episode rewards: #0: 24.711, true rewards: #0: 10.511
|
2308 |
+
[2023-05-12 15:30:27,642][00161] Avg episode reward: 24.711, avg true_objective: 10.511
|
2309 |
+
[2023-05-12 15:31:33,775][00161] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|