Upload folder using huggingface_hub
Browse files
.summary/0/events.out.tfevents.1688485318.e07d25fd0378
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fb791f51c742df8a1c9e16b6364c072572916cf73423a3f96a4b5dff483d3a28
|
3 |
+
size 477121
|
README.md
CHANGED
@@ -15,7 +15,7 @@ model-index:
|
|
15 |
type: doom_health_gathering_supreme
|
16 |
metrics:
|
17 |
- type: mean_reward
|
18 |
-
value:
|
19 |
name: mean_reward
|
20 |
verified: false
|
21 |
---
|
|
|
15 |
type: doom_health_gathering_supreme
|
16 |
metrics:
|
17 |
- type: mean_reward
|
18 |
+
value: 9.20 +/- 6.90
|
19 |
name: mean_reward
|
20 |
verified: false
|
21 |
---
|
checkpoint_p0/best_000000923_3780608_reward_25.008.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f99eb7ed85310bb1ed9425b70f9c9391aab4a651382a677d483ab8c9dffabc3d
|
3 |
+
size 34928806
|
checkpoint_p0/checkpoint_000000874_3579904.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:32aa48de04dd532a12a43951fb5df18da0842821b98ec8cb3e5e1d6751d43e68
|
3 |
+
size 34929220
|
checkpoint_p0/checkpoint_000000978_4005888.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4cf53e5293626e176b0f248d7190abf8e64d8436f2862353319facf0fad5041e
|
3 |
+
size 34929220
|
config.json
CHANGED
@@ -65,7 +65,7 @@
|
|
65 |
"summaries_use_frameskip": true,
|
66 |
"heartbeat_interval": 20,
|
67 |
"heartbeat_reporting_interval": 600,
|
68 |
-
"train_for_env_steps":
|
69 |
"train_for_seconds": 10000000000,
|
70 |
"save_every_sec": 120,
|
71 |
"keep_checkpoints": 2,
|
|
|
65 |
"summaries_use_frameskip": true,
|
66 |
"heartbeat_interval": 20,
|
67 |
"heartbeat_reporting_interval": 600,
|
68 |
+
"train_for_env_steps": 4000000,
|
69 |
"train_for_seconds": 10000000000,
|
70 |
"save_every_sec": 120,
|
71 |
"keep_checkpoints": 2,
|
replay.mp4
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3cd28f20ee6e27ecd0b5908b6cc0b7023ae49b0c216d117bc4d9ea50b2dc22ac
|
3 |
+
size 17560860
|
sf_log.txt
CHANGED
@@ -2951,3 +2951,829 @@ main_loop: 287.1521
|
|
2951 |
[2023-07-04 15:40:08,709][18333] Avg episode rewards: #0: 19.761, true rewards: #0: 8.861
|
2952 |
[2023-07-04 15:40:08,711][18333] Avg episode reward: 19.761, avg true_objective: 8.861
|
2953 |
[2023-07-04 15:41:03,224][18333] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2951 |
[2023-07-04 15:40:08,709][18333] Avg episode rewards: #0: 19.761, true rewards: #0: 8.861
|
2952 |
[2023-07-04 15:40:08,711][18333] Avg episode reward: 19.761, avg true_objective: 8.861
|
2953 |
[2023-07-04 15:41:03,224][18333] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|
2954 |
+
[2023-07-04 15:41:06,055][18333] The model has been pushed to https://huggingface.co/HilbertS/rl_course_vizdoom_health_gathering_supreme
|
2955 |
+
[2023-07-04 15:41:58,356][18333] Environment doom_basic already registered, overwriting...
|
2956 |
+
[2023-07-04 15:41:58,358][18333] Environment doom_two_colors_easy already registered, overwriting...
|
2957 |
+
[2023-07-04 15:41:58,360][18333] Environment doom_two_colors_hard already registered, overwriting...
|
2958 |
+
[2023-07-04 15:41:58,364][18333] Environment doom_dm already registered, overwriting...
|
2959 |
+
[2023-07-04 15:41:58,365][18333] Environment doom_dwango5 already registered, overwriting...
|
2960 |
+
[2023-07-04 15:41:58,367][18333] Environment doom_my_way_home_flat_actions already registered, overwriting...
|
2961 |
+
[2023-07-04 15:41:58,369][18333] Environment doom_defend_the_center_flat_actions already registered, overwriting...
|
2962 |
+
[2023-07-04 15:41:58,370][18333] Environment doom_my_way_home already registered, overwriting...
|
2963 |
+
[2023-07-04 15:41:58,371][18333] Environment doom_deadly_corridor already registered, overwriting...
|
2964 |
+
[2023-07-04 15:41:58,373][18333] Environment doom_defend_the_center already registered, overwriting...
|
2965 |
+
[2023-07-04 15:41:58,375][18333] Environment doom_defend_the_line already registered, overwriting...
|
2966 |
+
[2023-07-04 15:41:58,376][18333] Environment doom_health_gathering already registered, overwriting...
|
2967 |
+
[2023-07-04 15:41:58,377][18333] Environment doom_health_gathering_supreme already registered, overwriting...
|
2968 |
+
[2023-07-04 15:41:58,379][18333] Environment doom_battle already registered, overwriting...
|
2969 |
+
[2023-07-04 15:41:58,380][18333] Environment doom_battle2 already registered, overwriting...
|
2970 |
+
[2023-07-04 15:41:58,381][18333] Environment doom_duel_bots already registered, overwriting...
|
2971 |
+
[2023-07-04 15:41:58,382][18333] Environment doom_deathmatch_bots already registered, overwriting...
|
2972 |
+
[2023-07-04 15:41:58,384][18333] Environment doom_duel already registered, overwriting...
|
2973 |
+
[2023-07-04 15:41:58,385][18333] Environment doom_deathmatch_full already registered, overwriting...
|
2974 |
+
[2023-07-04 15:41:58,386][18333] Environment doom_benchmark already registered, overwriting...
|
2975 |
+
[2023-07-04 15:41:58,387][18333] register_encoder_factory: <function make_vizdoom_encoder at 0x7f3d79124820>
|
2976 |
+
[2023-07-04 15:41:58,425][18333] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json
|
2977 |
+
[2023-07-04 15:41:58,426][18333] Overriding arg 'train_for_env_steps' with value 4000000 passed from command line
|
2978 |
+
[2023-07-04 15:41:58,433][18333] Experiment dir /content/train_dir/default_experiment already exists!
|
2979 |
+
[2023-07-04 15:41:58,435][18333] Resuming existing experiment from /content/train_dir/default_experiment...
|
2980 |
+
[2023-07-04 15:41:58,437][18333] Weights and Biases integration disabled
|
2981 |
+
[2023-07-04 15:41:58,443][18333] Environment var CUDA_VISIBLE_DEVICES is 0
|
2982 |
+
|
2983 |
+
[2023-07-04 15:41:59,883][18333] Starting experiment with the following configuration:
|
2984 |
+
help=False
|
2985 |
+
algo=APPO
|
2986 |
+
env=doom_health_gathering_supreme
|
2987 |
+
experiment=default_experiment
|
2988 |
+
train_dir=/content/train_dir
|
2989 |
+
restart_behavior=resume
|
2990 |
+
device=gpu
|
2991 |
+
seed=None
|
2992 |
+
num_policies=1
|
2993 |
+
async_rl=True
|
2994 |
+
serial_mode=False
|
2995 |
+
batched_sampling=False
|
2996 |
+
num_batches_to_accumulate=2
|
2997 |
+
worker_num_splits=2
|
2998 |
+
policy_workers_per_policy=1
|
2999 |
+
max_policy_lag=1000
|
3000 |
+
num_workers=8
|
3001 |
+
num_envs_per_worker=4
|
3002 |
+
batch_size=1024
|
3003 |
+
num_batches_per_epoch=1
|
3004 |
+
num_epochs=1
|
3005 |
+
rollout=32
|
3006 |
+
recurrence=32
|
3007 |
+
shuffle_minibatches=False
|
3008 |
+
gamma=0.99
|
3009 |
+
reward_scale=1.0
|
3010 |
+
reward_clip=1000.0
|
3011 |
+
value_bootstrap=False
|
3012 |
+
normalize_returns=True
|
3013 |
+
exploration_loss_coeff=0.001
|
3014 |
+
value_loss_coeff=0.5
|
3015 |
+
kl_loss_coeff=0.0
|
3016 |
+
exploration_loss=symmetric_kl
|
3017 |
+
gae_lambda=0.95
|
3018 |
+
ppo_clip_ratio=0.1
|
3019 |
+
ppo_clip_value=0.2
|
3020 |
+
with_vtrace=False
|
3021 |
+
vtrace_rho=1.0
|
3022 |
+
vtrace_c=1.0
|
3023 |
+
optimizer=adam
|
3024 |
+
adam_eps=1e-06
|
3025 |
+
adam_beta1=0.9
|
3026 |
+
adam_beta2=0.999
|
3027 |
+
max_grad_norm=4.0
|
3028 |
+
learning_rate=0.0001
|
3029 |
+
lr_schedule=constant
|
3030 |
+
lr_schedule_kl_threshold=0.008
|
3031 |
+
lr_adaptive_min=1e-06
|
3032 |
+
lr_adaptive_max=0.01
|
3033 |
+
obs_subtract_mean=0.0
|
3034 |
+
obs_scale=255.0
|
3035 |
+
normalize_input=True
|
3036 |
+
normalize_input_keys=None
|
3037 |
+
decorrelate_experience_max_seconds=0
|
3038 |
+
decorrelate_envs_on_one_worker=True
|
3039 |
+
actor_worker_gpus=[]
|
3040 |
+
set_workers_cpu_affinity=True
|
3041 |
+
force_envs_single_thread=False
|
3042 |
+
default_niceness=0
|
3043 |
+
log_to_file=True
|
3044 |
+
experiment_summaries_interval=10
|
3045 |
+
flush_summaries_interval=30
|
3046 |
+
stats_avg=100
|
3047 |
+
summaries_use_frameskip=True
|
3048 |
+
heartbeat_interval=20
|
3049 |
+
heartbeat_reporting_interval=600
|
3050 |
+
train_for_env_steps=4000000
|
3051 |
+
train_for_seconds=10000000000
|
3052 |
+
save_every_sec=120
|
3053 |
+
keep_checkpoints=2
|
3054 |
+
load_checkpoint_kind=latest
|
3055 |
+
save_milestones_sec=-1
|
3056 |
+
save_best_every_sec=5
|
3057 |
+
save_best_metric=reward
|
3058 |
+
save_best_after=100000
|
3059 |
+
benchmark=False
|
3060 |
+
encoder_mlp_layers=[512, 512]
|
3061 |
+
encoder_conv_architecture=convnet_simple
|
3062 |
+
encoder_conv_mlp_layers=[512]
|
3063 |
+
use_rnn=True
|
3064 |
+
rnn_size=512
|
3065 |
+
rnn_type=gru
|
3066 |
+
rnn_num_layers=1
|
3067 |
+
decoder_mlp_layers=[]
|
3068 |
+
nonlinearity=elu
|
3069 |
+
policy_initialization=orthogonal
|
3070 |
+
policy_init_gain=1.0
|
3071 |
+
actor_critic_share_weights=True
|
3072 |
+
adaptive_stddev=True
|
3073 |
+
continuous_tanh_scale=0.0
|
3074 |
+
initial_stddev=1.0
|
3075 |
+
use_env_info_cache=False
|
3076 |
+
env_gpu_actions=False
|
3077 |
+
env_gpu_observations=True
|
3078 |
+
env_frameskip=4
|
3079 |
+
env_framestack=1
|
3080 |
+
pixel_format=CHW
|
3081 |
+
use_record_episode_statistics=False
|
3082 |
+
with_wandb=False
|
3083 |
+
wandb_user=None
|
3084 |
+
wandb_project=sample_factory
|
3085 |
+
wandb_group=None
|
3086 |
+
wandb_job_type=SF
|
3087 |
+
wandb_tags=[]
|
3088 |
+
with_pbt=False
|
3089 |
+
pbt_mix_policies_in_one_env=True
|
3090 |
+
pbt_period_env_steps=5000000
|
3091 |
+
pbt_start_mutation=20000000
|
3092 |
+
pbt_replace_fraction=0.3
|
3093 |
+
pbt_mutation_rate=0.15
|
3094 |
+
pbt_replace_reward_gap=0.1
|
3095 |
+
pbt_replace_reward_gap_absolute=1e-06
|
3096 |
+
pbt_optimize_gamma=False
|
3097 |
+
pbt_target_objective=true_objective
|
3098 |
+
pbt_perturb_min=1.1
|
3099 |
+
pbt_perturb_max=1.5
|
3100 |
+
num_agents=-1
|
3101 |
+
num_humans=0
|
3102 |
+
num_bots=-1
|
3103 |
+
start_bot_difficulty=None
|
3104 |
+
timelimit=None
|
3105 |
+
res_w=128
|
3106 |
+
res_h=72
|
3107 |
+
wide_aspect_ratio=False
|
3108 |
+
eval_env_frameskip=1
|
3109 |
+
fps=35
|
3110 |
+
command_line=--env=doom_health_gathering_supreme --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=4000000
|
3111 |
+
cli_args={'env': 'doom_health_gathering_supreme', 'num_workers': 8, 'num_envs_per_worker': 4, 'train_for_env_steps': 4000000}
|
3112 |
+
git_hash=unknown
|
3113 |
+
git_repo_name=not a git repository
|
3114 |
+
[2023-07-04 15:41:59,887][18333] Saving configuration to /content/train_dir/default_experiment/config.json...
|
3115 |
+
[2023-07-04 15:41:59,893][18333] Rollout worker 0 uses device cpu
|
3116 |
+
[2023-07-04 15:41:59,896][18333] Rollout worker 1 uses device cpu
|
3117 |
+
[2023-07-04 15:41:59,897][18333] Rollout worker 2 uses device cpu
|
3118 |
+
[2023-07-04 15:41:59,898][18333] Rollout worker 3 uses device cpu
|
3119 |
+
[2023-07-04 15:41:59,899][18333] Rollout worker 4 uses device cpu
|
3120 |
+
[2023-07-04 15:41:59,900][18333] Rollout worker 5 uses device cpu
|
3121 |
+
[2023-07-04 15:41:59,901][18333] Rollout worker 6 uses device cpu
|
3122 |
+
[2023-07-04 15:41:59,903][18333] Rollout worker 7 uses device cpu
|
3123 |
+
[2023-07-04 15:42:00,023][18333] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
3124 |
+
[2023-07-04 15:42:00,029][18333] InferenceWorker_p0-w0: min num requests: 2
|
3125 |
+
[2023-07-04 15:42:00,068][18333] Starting all processes...
|
3126 |
+
[2023-07-04 15:42:00,073][18333] Starting process learner_proc0
|
3127 |
+
[2023-07-04 15:42:00,141][18333] Starting all processes...
|
3128 |
+
[2023-07-04 15:42:00,150][18333] Starting process inference_proc0-0
|
3129 |
+
[2023-07-04 15:42:00,150][18333] Starting process rollout_proc0
|
3130 |
+
[2023-07-04 15:42:00,151][18333] Starting process rollout_proc1
|
3131 |
+
[2023-07-04 15:42:00,151][18333] Starting process rollout_proc2
|
3132 |
+
[2023-07-04 15:42:00,151][18333] Starting process rollout_proc3
|
3133 |
+
[2023-07-04 15:42:00,151][18333] Starting process rollout_proc4
|
3134 |
+
[2023-07-04 15:42:00,151][18333] Starting process rollout_proc5
|
3135 |
+
[2023-07-04 15:42:00,151][18333] Starting process rollout_proc6
|
3136 |
+
[2023-07-04 15:42:00,151][18333] Starting process rollout_proc7
|
3137 |
+
[2023-07-04 15:42:12,217][22133] Worker 1 uses CPU cores [1]
|
3138 |
+
[2023-07-04 15:42:12,582][22113] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
3139 |
+
[2023-07-04 15:42:12,585][22113] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0
|
3140 |
+
[2023-07-04 15:42:12,593][22135] Worker 4 uses CPU cores [0]
|
3141 |
+
[2023-07-04 15:42:12,595][22126] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
3142 |
+
[2023-07-04 15:42:12,597][22126] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0
|
3143 |
+
[2023-07-04 15:42:12,630][22113] Num visible devices: 1
|
3144 |
+
[2023-07-04 15:42:12,657][22126] Num visible devices: 1
|
3145 |
+
[2023-07-04 15:42:12,668][22113] Starting seed is not provided
|
3146 |
+
[2023-07-04 15:42:12,668][22113] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
3147 |
+
[2023-07-04 15:42:12,669][22113] Initializing actor-critic model on device cuda:0
|
3148 |
+
[2023-07-04 15:42:12,670][22113] RunningMeanStd input shape: (3, 72, 128)
|
3149 |
+
[2023-07-04 15:42:12,671][22113] RunningMeanStd input shape: (1,)
|
3150 |
+
[2023-07-04 15:42:12,685][22138] Worker 7 uses CPU cores [1]
|
3151 |
+
[2023-07-04 15:42:12,686][22132] Worker 3 uses CPU cores [1]
|
3152 |
+
[2023-07-04 15:42:12,691][22137] Worker 6 uses CPU cores [0]
|
3153 |
+
[2023-07-04 15:42:12,703][22113] ConvEncoder: input_channels=3
|
3154 |
+
[2023-07-04 15:42:12,717][22130] Worker 0 uses CPU cores [0]
|
3155 |
+
[2023-07-04 15:42:12,739][22136] Worker 5 uses CPU cores [1]
|
3156 |
+
[2023-07-04 15:42:12,742][22134] Worker 2 uses CPU cores [0]
|
3157 |
+
[2023-07-04 15:42:12,832][22113] Conv encoder output size: 512
|
3158 |
+
[2023-07-04 15:42:12,833][22113] Policy head output size: 512
|
3159 |
+
[2023-07-04 15:42:12,848][22113] Created Actor Critic model with architecture:
|
3160 |
+
[2023-07-04 15:42:12,848][22113] ActorCriticSharedWeights(
|
3161 |
+
(obs_normalizer): ObservationNormalizer(
|
3162 |
+
(running_mean_std): RunningMeanStdDictInPlace(
|
3163 |
+
(running_mean_std): ModuleDict(
|
3164 |
+
(obs): RunningMeanStdInPlace()
|
3165 |
+
)
|
3166 |
+
)
|
3167 |
+
)
|
3168 |
+
(returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace)
|
3169 |
+
(encoder): VizdoomEncoder(
|
3170 |
+
(basic_encoder): ConvEncoder(
|
3171 |
+
(enc): RecursiveScriptModule(
|
3172 |
+
original_name=ConvEncoderImpl
|
3173 |
+
(conv_head): RecursiveScriptModule(
|
3174 |
+
original_name=Sequential
|
3175 |
+
(0): RecursiveScriptModule(original_name=Conv2d)
|
3176 |
+
(1): RecursiveScriptModule(original_name=ELU)
|
3177 |
+
(2): RecursiveScriptModule(original_name=Conv2d)
|
3178 |
+
(3): RecursiveScriptModule(original_name=ELU)
|
3179 |
+
(4): RecursiveScriptModule(original_name=Conv2d)
|
3180 |
+
(5): RecursiveScriptModule(original_name=ELU)
|
3181 |
+
)
|
3182 |
+
(mlp_layers): RecursiveScriptModule(
|
3183 |
+
original_name=Sequential
|
3184 |
+
(0): RecursiveScriptModule(original_name=Linear)
|
3185 |
+
(1): RecursiveScriptModule(original_name=ELU)
|
3186 |
+
)
|
3187 |
+
)
|
3188 |
+
)
|
3189 |
+
)
|
3190 |
+
(core): ModelCoreRNN(
|
3191 |
+
(core): GRU(512, 512)
|
3192 |
+
)
|
3193 |
+
(decoder): MlpDecoder(
|
3194 |
+
(mlp): Identity()
|
3195 |
+
)
|
3196 |
+
(critic_linear): Linear(in_features=512, out_features=1, bias=True)
|
3197 |
+
(action_parameterization): ActionParameterizationDefault(
|
3198 |
+
(distribution_linear): Linear(in_features=512, out_features=5, bias=True)
|
3199 |
+
)
|
3200 |
+
)
|
3201 |
+
[2023-07-04 15:42:14,186][22113] Using optimizer <class 'torch.optim.adam.Adam'>
|
3202 |
+
[2023-07-04 15:42:14,187][22113] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000490_2007040.pth...
|
3203 |
+
[2023-07-04 15:42:14,218][22113] Loading model from checkpoint
|
3204 |
+
[2023-07-04 15:42:14,222][22113] Loaded experiment state at self.train_step=490, self.env_steps=2007040
|
3205 |
+
[2023-07-04 15:42:14,222][22113] Initialized policy 0 weights for model version 490
|
3206 |
+
[2023-07-04 15:42:14,225][22113] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
3207 |
+
[2023-07-04 15:42:14,238][22113] LearnerWorker_p0 finished initialization!
|
3208 |
+
[2023-07-04 15:42:14,414][22126] RunningMeanStd input shape: (3, 72, 128)
|
3209 |
+
[2023-07-04 15:42:14,415][22126] RunningMeanStd input shape: (1,)
|
3210 |
+
[2023-07-04 15:42:14,427][22126] ConvEncoder: input_channels=3
|
3211 |
+
[2023-07-04 15:42:14,530][22126] Conv encoder output size: 512
|
3212 |
+
[2023-07-04 15:42:14,530][22126] Policy head output size: 512
|
3213 |
+
[2023-07-04 15:42:15,731][18333] Inference worker 0-0 is ready!
|
3214 |
+
[2023-07-04 15:42:15,733][18333] All inference workers are ready! Signal rollout workers to start!
|
3215 |
+
[2023-07-04 15:42:15,833][22136] Doom resolution: 160x120, resize resolution: (128, 72)
|
3216 |
+
[2023-07-04 15:42:15,834][22138] Doom resolution: 160x120, resize resolution: (128, 72)
|
3217 |
+
[2023-07-04 15:42:15,836][22133] Doom resolution: 160x120, resize resolution: (128, 72)
|
3218 |
+
[2023-07-04 15:42:15,837][22132] Doom resolution: 160x120, resize resolution: (128, 72)
|
3219 |
+
[2023-07-04 15:42:15,829][22134] Doom resolution: 160x120, resize resolution: (128, 72)
|
3220 |
+
[2023-07-04 15:42:15,840][22135] Doom resolution: 160x120, resize resolution: (128, 72)
|
3221 |
+
[2023-07-04 15:42:15,837][22137] Doom resolution: 160x120, resize resolution: (128, 72)
|
3222 |
+
[2023-07-04 15:42:15,838][22130] Doom resolution: 160x120, resize resolution: (128, 72)
|
3223 |
+
[2023-07-04 15:42:16,364][22134] Decorrelating experience for 0 frames...
|
3224 |
+
[2023-07-04 15:42:16,789][22135] Decorrelating experience for 0 frames...
|
3225 |
+
[2023-07-04 15:42:17,197][22132] Decorrelating experience for 0 frames...
|
3226 |
+
[2023-07-04 15:42:17,199][22133] Decorrelating experience for 0 frames...
|
3227 |
+
[2023-07-04 15:42:17,201][22136] Decorrelating experience for 0 frames...
|
3228 |
+
[2023-07-04 15:42:17,205][22138] Decorrelating experience for 0 frames...
|
3229 |
+
[2023-07-04 15:42:18,447][18333] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 2007040. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
3230 |
+
[2023-07-04 15:42:18,787][22130] Decorrelating experience for 0 frames...
|
3231 |
+
[2023-07-04 15:42:18,793][22136] Decorrelating experience for 32 frames...
|
3232 |
+
[2023-07-04 15:42:18,799][22133] Decorrelating experience for 32 frames...
|
3233 |
+
[2023-07-04 15:42:18,804][22138] Decorrelating experience for 32 frames...
|
3234 |
+
[2023-07-04 15:42:18,819][22134] Decorrelating experience for 32 frames...
|
3235 |
+
[2023-07-04 15:42:18,872][22135] Decorrelating experience for 32 frames...
|
3236 |
+
[2023-07-04 15:42:20,014][18333] Heartbeat connected on Batcher_0
|
3237 |
+
[2023-07-04 15:42:20,019][18333] Heartbeat connected on LearnerWorker_p0
|
3238 |
+
[2023-07-04 15:42:20,073][18333] Heartbeat connected on InferenceWorker_p0-w0
|
3239 |
+
[2023-07-04 15:42:20,499][22137] Decorrelating experience for 0 frames...
|
3240 |
+
[2023-07-04 15:42:20,531][22130] Decorrelating experience for 32 frames...
|
3241 |
+
[2023-07-04 15:42:20,759][22135] Decorrelating experience for 64 frames...
|
3242 |
+
[2023-07-04 15:42:21,358][22132] Decorrelating experience for 32 frames...
|
3243 |
+
[2023-07-04 15:42:21,673][22136] Decorrelating experience for 64 frames...
|
3244 |
+
[2023-07-04 15:42:21,685][22133] Decorrelating experience for 64 frames...
|
3245 |
+
[2023-07-04 15:42:22,606][22138] Decorrelating experience for 64 frames...
|
3246 |
+
[2023-07-04 15:42:22,860][22130] Decorrelating experience for 64 frames...
|
3247 |
+
[2023-07-04 15:42:22,874][22137] Decorrelating experience for 32 frames...
|
3248 |
+
[2023-07-04 15:42:23,443][18333] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 2007040. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
3249 |
+
[2023-07-04 15:42:23,493][22136] Decorrelating experience for 96 frames...
|
3250 |
+
[2023-07-04 15:42:23,661][22134] Decorrelating experience for 64 frames...
|
3251 |
+
[2023-07-04 15:42:23,829][18333] Heartbeat connected on RolloutWorker_w5
|
3252 |
+
[2023-07-04 15:42:24,876][22132] Decorrelating experience for 64 frames...
|
3253 |
+
[2023-07-04 15:42:25,138][22138] Decorrelating experience for 96 frames...
|
3254 |
+
[2023-07-04 15:42:25,516][22137] Decorrelating experience for 64 frames...
|
3255 |
+
[2023-07-04 15:42:25,520][18333] Heartbeat connected on RolloutWorker_w7
|
3256 |
+
[2023-07-04 15:42:26,625][22130] Decorrelating experience for 96 frames...
|
3257 |
+
[2023-07-04 15:42:27,023][22135] Decorrelating experience for 96 frames...
|
3258 |
+
[2023-07-04 15:42:27,323][18333] Heartbeat connected on RolloutWorker_w0
|
3259 |
+
[2023-07-04 15:42:27,403][22134] Decorrelating experience for 96 frames...
|
3260 |
+
[2023-07-04 15:42:27,426][22133] Decorrelating experience for 96 frames...
|
3261 |
+
[2023-07-04 15:42:27,727][18333] Heartbeat connected on RolloutWorker_w4
|
3262 |
+
[2023-07-04 15:42:27,884][18333] Heartbeat connected on RolloutWorker_w2
|
3263 |
+
[2023-07-04 15:42:28,014][18333] Heartbeat connected on RolloutWorker_w1
|
3264 |
+
[2023-07-04 15:42:28,444][18333] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 2007040. Throughput: 0: 104.4. Samples: 1044. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
3265 |
+
[2023-07-04 15:42:28,450][18333] Avg episode reward: [(0, '5.920')]
|
3266 |
+
[2023-07-04 15:42:30,105][22137] Decorrelating experience for 96 frames...
|
3267 |
+
[2023-07-04 15:42:30,522][22113] Signal inference workers to stop experience collection...
|
3268 |
+
[2023-07-04 15:42:30,544][22126] InferenceWorker_p0-w0: stopping experience collection
|
3269 |
+
[2023-07-04 15:42:30,576][18333] Heartbeat connected on RolloutWorker_w6
|
3270 |
+
[2023-07-04 15:42:30,706][22132] Decorrelating experience for 96 frames...
|
3271 |
+
[2023-07-04 15:42:30,763][18333] Heartbeat connected on RolloutWorker_w3
|
3272 |
+
[2023-07-04 15:42:31,213][22113] Signal inference workers to resume experience collection...
|
3273 |
+
[2023-07-04 15:42:31,213][22126] InferenceWorker_p0-w0: resuming experience collection
|
3274 |
+
[2023-07-04 15:42:33,444][18333] Fps is (10 sec: 1228.8, 60 sec: 819.4, 300 sec: 819.4). Total num frames: 2019328. Throughput: 0: 161.6. Samples: 2424. Policy #0 lag: (min: 0.0, avg: 0.0, max: 0.0)
|
3275 |
+
[2023-07-04 15:42:33,448][18333] Avg episode reward: [(0, '6.131')]
|
3276 |
+
[2023-07-04 15:42:38,444][18333] Fps is (10 sec: 3277.0, 60 sec: 1638.7, 300 sec: 1638.7). Total num frames: 2039808. Throughput: 0: 379.1. Samples: 7580. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
3277 |
+
[2023-07-04 15:42:38,446][18333] Avg episode reward: [(0, '10.472')]
|
3278 |
+
[2023-07-04 15:42:39,885][22126] Updated weights for policy 0, policy_version 500 (0.0364)
|
3279 |
+
[2023-07-04 15:42:43,444][18333] Fps is (10 sec: 3686.4, 60 sec: 1966.4, 300 sec: 1966.4). Total num frames: 2056192. Throughput: 0: 513.3. Samples: 12830. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
3280 |
+
[2023-07-04 15:42:43,448][18333] Avg episode reward: [(0, '12.881')]
|
3281 |
+
[2023-07-04 15:42:48,444][18333] Fps is (10 sec: 2867.2, 60 sec: 2048.3, 300 sec: 2048.3). Total num frames: 2068480. Throughput: 0: 491.9. Samples: 14756. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
3282 |
+
[2023-07-04 15:42:48,454][18333] Avg episode reward: [(0, '15.054')]
|
3283 |
+
[2023-07-04 15:42:53,443][18333] Fps is (10 sec: 2867.2, 60 sec: 2223.8, 300 sec: 2223.8). Total num frames: 2084864. Throughput: 0: 537.4. Samples: 18808. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
3284 |
+
[2023-07-04 15:42:53,446][18333] Avg episode reward: [(0, '15.502')]
|
3285 |
+
[2023-07-04 15:42:54,028][22126] Updated weights for policy 0, policy_version 510 (0.0012)
|
3286 |
+
[2023-07-04 15:42:58,444][18333] Fps is (10 sec: 3686.4, 60 sec: 2457.8, 300 sec: 2457.8). Total num frames: 2105344. Throughput: 0: 631.0. Samples: 25236. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
3287 |
+
[2023-07-04 15:42:58,450][18333] Avg episode reward: [(0, '16.713')]
|
3288 |
+
[2023-07-04 15:43:03,444][18333] Fps is (10 sec: 4095.9, 60 sec: 2639.9, 300 sec: 2639.9). Total num frames: 2125824. Throughput: 0: 636.5. Samples: 28642. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
3289 |
+
[2023-07-04 15:43:03,447][18333] Avg episode reward: [(0, '20.677')]
|
3290 |
+
[2023-07-04 15:43:03,667][22126] Updated weights for policy 0, policy_version 520 (0.0018)
|
3291 |
+
[2023-07-04 15:43:08,444][18333] Fps is (10 sec: 3686.4, 60 sec: 2703.6, 300 sec: 2703.6). Total num frames: 2142208. Throughput: 0: 738.4. Samples: 33228. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
|
3292 |
+
[2023-07-04 15:43:08,455][18333] Avg episode reward: [(0, '20.643')]
|
3293 |
+
[2023-07-04 15:43:13,444][18333] Fps is (10 sec: 2867.2, 60 sec: 2681.2, 300 sec: 2681.2). Total num frames: 2154496. Throughput: 0: 811.4. Samples: 37556. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
|
3294 |
+
[2023-07-04 15:43:13,449][18333] Avg episode reward: [(0, '20.266')]
|
3295 |
+
[2023-07-04 15:43:16,601][22126] Updated weights for policy 0, policy_version 530 (0.0027)
|
3296 |
+
[2023-07-04 15:43:18,444][18333] Fps is (10 sec: 3276.8, 60 sec: 2799.1, 300 sec: 2799.1). Total num frames: 2174976. Throughput: 0: 844.5. Samples: 40426. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
3297 |
+
[2023-07-04 15:43:18,446][18333] Avg episode reward: [(0, '19.739')]
|
3298 |
+
[2023-07-04 15:43:23,449][18333] Fps is (10 sec: 4503.0, 60 sec: 3208.2, 300 sec: 2961.6). Total num frames: 2199552. Throughput: 0: 876.5. Samples: 47026. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
3299 |
+
[2023-07-04 15:43:23,456][18333] Avg episode reward: [(0, '19.858')]
|
3300 |
+
[2023-07-04 15:43:26,881][22126] Updated weights for policy 0, policy_version 540 (0.0017)
|
3301 |
+
[2023-07-04 15:43:28,444][18333] Fps is (10 sec: 4096.0, 60 sec: 3481.6, 300 sec: 2984.4). Total num frames: 2215936. Throughput: 0: 874.7. Samples: 52192. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
3302 |
+
[2023-07-04 15:43:28,447][18333] Avg episode reward: [(0, '18.952')]
|
3303 |
+
[2023-07-04 15:43:33,444][18333] Fps is (10 sec: 2868.7, 60 sec: 3481.6, 300 sec: 2949.2). Total num frames: 2228224. Throughput: 0: 878.7. Samples: 54300. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
3304 |
+
[2023-07-04 15:43:33,449][18333] Avg episode reward: [(0, '18.617')]
|
3305 |
+
[2023-07-04 15:43:38,444][18333] Fps is (10 sec: 2867.1, 60 sec: 3413.3, 300 sec: 2969.7). Total num frames: 2244608. Throughput: 0: 883.5. Samples: 58564. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
3306 |
+
[2023-07-04 15:43:38,446][18333] Avg episode reward: [(0, '19.696')]
|
3307 |
+
[2023-07-04 15:43:39,910][22126] Updated weights for policy 0, policy_version 550 (0.0015)
|
3308 |
+
[2023-07-04 15:43:43,445][18333] Fps is (10 sec: 3686.3, 60 sec: 3481.5, 300 sec: 3036.0). Total num frames: 2265088. Throughput: 0: 888.3. Samples: 65212. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
3309 |
+
[2023-07-04 15:43:43,450][18333] Avg episode reward: [(0, '20.749')]
|
3310 |
+
[2023-07-04 15:43:48,448][18333] Fps is (10 sec: 4094.1, 60 sec: 3617.9, 300 sec: 3094.7). Total num frames: 2285568. Throughput: 0: 886.9. Samples: 68558. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
|
3311 |
+
[2023-07-04 15:43:48,451][18333] Avg episode reward: [(0, '20.008')]
|
3312 |
+
[2023-07-04 15:43:50,566][22126] Updated weights for policy 0, policy_version 560 (0.0015)
|
3313 |
+
[2023-07-04 15:43:53,444][18333] Fps is (10 sec: 3277.2, 60 sec: 3549.9, 300 sec: 3061.3). Total num frames: 2297856. Throughput: 0: 883.6. Samples: 72990. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
3314 |
+
[2023-07-04 15:43:53,450][18333] Avg episode reward: [(0, '20.912')]
|
3315 |
+
[2023-07-04 15:43:58,444][18333] Fps is (10 sec: 2868.5, 60 sec: 3481.6, 300 sec: 3072.1). Total num frames: 2314240. Throughput: 0: 878.1. Samples: 77072. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
3316 |
+
[2023-07-04 15:43:58,453][18333] Avg episode reward: [(0, '21.782')]
|
3317 |
+
[2023-07-04 15:43:58,465][22113] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000565_2314240.pth...
|
3318 |
+
[2023-07-04 15:43:58,661][22113] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000448_1835008.pth
|
3319 |
+
[2023-07-04 15:43:58,681][22113] Saving new best policy, reward=21.782!
|
3320 |
+
[2023-07-04 15:44:03,276][22126] Updated weights for policy 0, policy_version 570 (0.0027)
|
3321 |
+
[2023-07-04 15:44:03,443][18333] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3120.9). Total num frames: 2334720. Throughput: 0: 873.0. Samples: 79712. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
3322 |
+
[2023-07-04 15:44:03,450][18333] Avg episode reward: [(0, '21.747')]
|
3323 |
+
[2023-07-04 15:44:08,444][18333] Fps is (10 sec: 4096.1, 60 sec: 3549.9, 300 sec: 3165.2). Total num frames: 2355200. Throughput: 0: 876.9. Samples: 86482. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
3324 |
+
[2023-07-04 15:44:08,446][18333] Avg episode reward: [(0, '22.334')]
|
3325 |
+
[2023-07-04 15:44:08,461][22113] Saving new best policy, reward=22.334!
|
3326 |
+
[2023-07-04 15:44:13,444][18333] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3170.1). Total num frames: 2371584. Throughput: 0: 877.7. Samples: 91688. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
3327 |
+
[2023-07-04 15:44:13,447][18333] Avg episode reward: [(0, '22.763')]
|
3328 |
+
[2023-07-04 15:44:13,454][22113] Saving new best policy, reward=22.763!
|
3329 |
+
[2023-07-04 15:44:14,147][22126] Updated weights for policy 0, policy_version 580 (0.0021)
|
3330 |
+
[2023-07-04 15:44:18,444][18333] Fps is (10 sec: 2867.0, 60 sec: 3481.6, 300 sec: 3140.3). Total num frames: 2383872. Throughput: 0: 873.9. Samples: 93626. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
3331 |
+
[2023-07-04 15:44:18,448][18333] Avg episode reward: [(0, '23.183')]
|
3332 |
+
[2023-07-04 15:44:18,460][22113] Saving new best policy, reward=23.183!
|
3333 |
+
[2023-07-04 15:44:23,444][18333] Fps is (10 sec: 2867.2, 60 sec: 3345.4, 300 sec: 3145.8). Total num frames: 2400256. Throughput: 0: 870.7. Samples: 97744. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
3334 |
+
[2023-07-04 15:44:23,446][18333] Avg episode reward: [(0, '21.347')]
|
3335 |
+
[2023-07-04 15:44:26,586][22126] Updated weights for policy 0, policy_version 590 (0.0015)
|
3336 |
+
[2023-07-04 15:44:28,444][18333] Fps is (10 sec: 3686.6, 60 sec: 3413.3, 300 sec: 3182.4). Total num frames: 2420736. Throughput: 0: 869.7. Samples: 104346. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
3337 |
+
[2023-07-04 15:44:28,445][18333] Avg episode reward: [(0, '21.713')]
|
3338 |
+
[2023-07-04 15:44:33,444][18333] Fps is (10 sec: 4096.0, 60 sec: 3549.9, 300 sec: 3216.2). Total num frames: 2441216. Throughput: 0: 868.1. Samples: 107620. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
3339 |
+
[2023-07-04 15:44:33,446][18333] Avg episode reward: [(0, '20.100')]
|
3340 |
+
[2023-07-04 15:44:38,163][22126] Updated weights for policy 0, policy_version 600 (0.0012)
|
3341 |
+
[2023-07-04 15:44:38,445][18333] Fps is (10 sec: 3686.0, 60 sec: 3549.8, 300 sec: 3218.3). Total num frames: 2457600. Throughput: 0: 866.4. Samples: 111978. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
3342 |
+
[2023-07-04 15:44:38,454][18333] Avg episode reward: [(0, '20.039')]
|
3343 |
+
[2023-07-04 15:44:43,444][18333] Fps is (10 sec: 2867.0, 60 sec: 3413.4, 300 sec: 3192.1). Total num frames: 2469888. Throughput: 0: 870.0. Samples: 116222. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
3344 |
+
[2023-07-04 15:44:43,449][18333] Avg episode reward: [(0, '19.196')]
|
3345 |
+
[2023-07-04 15:44:48,443][18333] Fps is (10 sec: 3686.8, 60 sec: 3481.9, 300 sec: 3249.6). Total num frames: 2494464. Throughput: 0: 875.0. Samples: 119086. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0)
|
3346 |
+
[2023-07-04 15:44:48,446][18333] Avg episode reward: [(0, '19.491')]
|
3347 |
+
[2023-07-04 15:44:49,316][22126] Updated weights for policy 0, policy_version 610 (0.0022)
|
3348 |
+
[2023-07-04 15:44:53,444][18333] Fps is (10 sec: 4505.8, 60 sec: 3618.1, 300 sec: 3276.9). Total num frames: 2514944. Throughput: 0: 873.4. Samples: 125786. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
|
3349 |
+
[2023-07-04 15:44:53,446][18333] Avg episode reward: [(0, '19.441')]
|
3350 |
+
[2023-07-04 15:44:58,444][18333] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3251.3). Total num frames: 2527232. Throughput: 0: 868.0. Samples: 130746. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0)
|
3351 |
+
[2023-07-04 15:44:58,446][18333] Avg episode reward: [(0, '20.312')]
|
3352 |
+
[2023-07-04 15:45:01,652][22126] Updated weights for policy 0, policy_version 620 (0.0012)
|
3353 |
+
[2023-07-04 15:45:03,444][18333] Fps is (10 sec: 2867.2, 60 sec: 3481.6, 300 sec: 3252.1). Total num frames: 2543616. Throughput: 0: 870.8. Samples: 132812. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
|
3354 |
+
[2023-07-04 15:45:03,446][18333] Avg episode reward: [(0, '20.249')]
|
3355 |
+
[2023-07-04 15:45:08,444][18333] Fps is (10 sec: 3276.8, 60 sec: 3413.3, 300 sec: 3252.8). Total num frames: 2560000. Throughput: 0: 875.5. Samples: 137142. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
|
3356 |
+
[2023-07-04 15:45:08,446][18333] Avg episode reward: [(0, '19.822')]
|
3357 |
+
[2023-07-04 15:45:12,866][22126] Updated weights for policy 0, policy_version 630 (0.0017)
|
3358 |
+
[2023-07-04 15:45:13,444][18333] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3276.9). Total num frames: 2580480. Throughput: 0: 875.6. Samples: 143748. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
|
3359 |
+
[2023-07-04 15:45:13,446][18333] Avg episode reward: [(0, '20.569')]
|
3360 |
+
[2023-07-04 15:45:18,444][18333] Fps is (10 sec: 4096.0, 60 sec: 3618.2, 300 sec: 3299.6). Total num frames: 2600960. Throughput: 0: 875.8. Samples: 147032. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
3361 |
+
[2023-07-04 15:45:18,446][18333] Avg episode reward: [(0, '21.334')]
|
3362 |
+
[2023-07-04 15:45:23,445][18333] Fps is (10 sec: 3276.2, 60 sec: 3549.8, 300 sec: 3276.8). Total num frames: 2613248. Throughput: 0: 881.4. Samples: 151640. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
3363 |
+
[2023-07-04 15:45:23,447][18333] Avg episode reward: [(0, '21.972')]
|
3364 |
+
[2023-07-04 15:45:25,316][22126] Updated weights for policy 0, policy_version 640 (0.0026)
|
3365 |
+
[2023-07-04 15:45:28,444][18333] Fps is (10 sec: 2867.2, 60 sec: 3481.6, 300 sec: 3276.9). Total num frames: 2629632. Throughput: 0: 877.9. Samples: 155728. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
3366 |
+
[2023-07-04 15:45:28,447][18333] Avg episode reward: [(0, '21.312')]
|
3367 |
+
[2023-07-04 15:45:33,444][18333] Fps is (10 sec: 3687.0, 60 sec: 3481.6, 300 sec: 3297.9). Total num frames: 2650112. Throughput: 0: 875.6. Samples: 158490. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
3368 |
+
[2023-07-04 15:45:33,446][18333] Avg episode reward: [(0, '21.985')]
|
3369 |
+
[2023-07-04 15:45:36,100][22126] Updated weights for policy 0, policy_version 650 (0.0019)
|
3370 |
+
[2023-07-04 15:45:38,444][18333] Fps is (10 sec: 4096.0, 60 sec: 3549.9, 300 sec: 3317.8). Total num frames: 2670592. Throughput: 0: 874.2. Samples: 165124. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
3371 |
+
[2023-07-04 15:45:38,446][18333] Avg episode reward: [(0, '21.232')]
|
3372 |
+
[2023-07-04 15:45:43,446][18333] Fps is (10 sec: 3685.4, 60 sec: 3618.0, 300 sec: 3316.8). Total num frames: 2686976. Throughput: 0: 876.4. Samples: 170188. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
3373 |
+
[2023-07-04 15:45:43,449][18333] Avg episode reward: [(0, '20.670')]
|
3374 |
+
[2023-07-04 15:45:48,444][18333] Fps is (10 sec: 2867.1, 60 sec: 3413.3, 300 sec: 3296.4). Total num frames: 2699264. Throughput: 0: 877.1. Samples: 172282. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
3375 |
+
[2023-07-04 15:45:48,449][18333] Avg episode reward: [(0, '19.693')]
|
3376 |
+
[2023-07-04 15:45:49,212][22126] Updated weights for policy 0, policy_version 660 (0.0027)
|
3377 |
+
[2023-07-04 15:45:53,444][18333] Fps is (10 sec: 2867.9, 60 sec: 3345.1, 300 sec: 3295.9). Total num frames: 2715648. Throughput: 0: 876.9. Samples: 176604. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
3378 |
+
[2023-07-04 15:45:53,446][18333] Avg episode reward: [(0, '18.681')]
|
3379 |
+
[2023-07-04 15:45:58,444][18333] Fps is (10 sec: 4096.1, 60 sec: 3549.9, 300 sec: 3332.7). Total num frames: 2740224. Throughput: 0: 877.8. Samples: 183250. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
3380 |
+
[2023-07-04 15:45:58,453][18333] Avg episode reward: [(0, '17.263')]
|
3381 |
+
[2023-07-04 15:45:58,465][22113] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000669_2740224.pth...
|
3382 |
+
[2023-07-04 15:45:58,593][22113] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000490_2007040.pth
|
3383 |
+
[2023-07-04 15:45:59,224][22126] Updated weights for policy 0, policy_version 670 (0.0013)
|
3384 |
+
[2023-07-04 15:46:03,446][18333] Fps is (10 sec: 4094.9, 60 sec: 3549.7, 300 sec: 3331.4). Total num frames: 2756608. Throughput: 0: 878.6. Samples: 186570. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
3385 |
+
[2023-07-04 15:46:03,451][18333] Avg episode reward: [(0, '17.009')]
|
3386 |
+
[2023-07-04 15:46:08,443][18333] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3330.3). Total num frames: 2772992. Throughput: 0: 872.7. Samples: 190908. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
3387 |
+
[2023-07-04 15:46:08,445][18333] Avg episode reward: [(0, '17.890')]
|
3388 |
+
[2023-07-04 15:46:12,802][22126] Updated weights for policy 0, policy_version 680 (0.0017)
|
3389 |
+
[2023-07-04 15:46:13,444][18333] Fps is (10 sec: 2868.0, 60 sec: 3413.3, 300 sec: 3311.7). Total num frames: 2785280. Throughput: 0: 874.4. Samples: 195076. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
3390 |
+
[2023-07-04 15:46:13,447][18333] Avg episode reward: [(0, '18.755')]
|
3391 |
+
[2023-07-04 15:46:18,444][18333] Fps is (10 sec: 3276.7, 60 sec: 3413.3, 300 sec: 3328.0). Total num frames: 2805760. Throughput: 0: 880.7. Samples: 198124. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
3392 |
+
[2023-07-04 15:46:18,451][18333] Avg episode reward: [(0, '19.946')]
|
3393 |
+
[2023-07-04 15:46:22,319][22126] Updated weights for policy 0, policy_version 690 (0.0015)
|
3394 |
+
[2023-07-04 15:46:23,444][18333] Fps is (10 sec: 4505.7, 60 sec: 3618.2, 300 sec: 3360.4). Total num frames: 2830336. Throughput: 0: 881.1. Samples: 204772. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
3395 |
+
[2023-07-04 15:46:23,446][18333] Avg episode reward: [(0, '21.407')]
|
3396 |
+
[2023-07-04 15:46:28,444][18333] Fps is (10 sec: 3686.5, 60 sec: 3549.9, 300 sec: 3342.4). Total num frames: 2842624. Throughput: 0: 875.4. Samples: 209578. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
3397 |
+
[2023-07-04 15:46:28,446][18333] Avg episode reward: [(0, '21.690')]
|
3398 |
+
[2023-07-04 15:46:33,444][18333] Fps is (10 sec: 2867.2, 60 sec: 3481.6, 300 sec: 3341.1). Total num frames: 2859008. Throughput: 0: 875.0. Samples: 211656. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
3399 |
+
[2023-07-04 15:46:33,446][18333] Avg episode reward: [(0, '23.014')]
|
3400 |
+
[2023-07-04 15:46:36,290][22126] Updated weights for policy 0, policy_version 700 (0.0014)
|
3401 |
+
[2023-07-04 15:46:38,444][18333] Fps is (10 sec: 3276.8, 60 sec: 3413.3, 300 sec: 3339.9). Total num frames: 2875392. Throughput: 0: 881.8. Samples: 216284. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
3402 |
+
[2023-07-04 15:46:38,449][18333] Avg episode reward: [(0, '22.682')]
|
3403 |
+
[2023-07-04 15:46:43,443][18333] Fps is (10 sec: 3686.4, 60 sec: 3481.8, 300 sec: 3354.1). Total num frames: 2895872. Throughput: 0: 880.4. Samples: 222870. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
3404 |
+
[2023-07-04 15:46:43,447][18333] Avg episode reward: [(0, '23.860')]
|
3405 |
+
[2023-07-04 15:46:43,454][22113] Saving new best policy, reward=23.860!
|
3406 |
+
[2023-07-04 15:46:45,418][22126] Updated weights for policy 0, policy_version 710 (0.0012)
|
3407 |
+
[2023-07-04 15:46:48,444][18333] Fps is (10 sec: 4096.0, 60 sec: 3618.2, 300 sec: 3367.9). Total num frames: 2916352. Throughput: 0: 876.8. Samples: 226024. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
3408 |
+
[2023-07-04 15:46:48,446][18333] Avg episode reward: [(0, '24.045')]
|
3409 |
+
[2023-07-04 15:46:48,452][22113] Saving new best policy, reward=24.045!
|
3410 |
+
[2023-07-04 15:46:53,444][18333] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3351.3). Total num frames: 2928640. Throughput: 0: 873.4. Samples: 230210. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
3411 |
+
[2023-07-04 15:46:53,446][18333] Avg episode reward: [(0, '23.673')]
|
3412 |
+
[2023-07-04 15:46:58,444][18333] Fps is (10 sec: 2867.1, 60 sec: 3413.3, 300 sec: 3350.0). Total num frames: 2945024. Throughput: 0: 874.7. Samples: 234436. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
|
3413 |
+
[2023-07-04 15:46:58,449][18333] Avg episode reward: [(0, '23.703')]
|
3414 |
+
[2023-07-04 15:46:59,263][22126] Updated weights for policy 0, policy_version 720 (0.0019)
|
3415 |
+
[2023-07-04 15:47:03,444][18333] Fps is (10 sec: 3686.4, 60 sec: 3481.8, 300 sec: 3363.1). Total num frames: 2965504. Throughput: 0: 879.2. Samples: 237686. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
3416 |
+
[2023-07-04 15:47:03,449][18333] Avg episode reward: [(0, '24.067')]
|
3417 |
+
[2023-07-04 15:47:03,454][22113] Saving new best policy, reward=24.067!
|
3418 |
+
[2023-07-04 15:47:08,444][18333] Fps is (10 sec: 4096.1, 60 sec: 3549.9, 300 sec: 3375.7). Total num frames: 2985984. Throughput: 0: 878.6. Samples: 244308. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
3419 |
+
[2023-07-04 15:47:08,453][18333] Avg episode reward: [(0, '23.020')]
|
3420 |
+
[2023-07-04 15:47:08,772][22126] Updated weights for policy 0, policy_version 730 (0.0014)
|
3421 |
+
[2023-07-04 15:47:13,444][18333] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3374.0). Total num frames: 3002368. Throughput: 0: 878.3. Samples: 249102. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
3422 |
+
[2023-07-04 15:47:13,451][18333] Avg episode reward: [(0, '23.164')]
|
3423 |
+
[2023-07-04 15:47:18,444][18333] Fps is (10 sec: 2867.1, 60 sec: 3481.6, 300 sec: 3415.6). Total num frames: 3014656. Throughput: 0: 878.1. Samples: 251172. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
3424 |
+
[2023-07-04 15:47:18,447][18333] Avg episode reward: [(0, '23.238')]
|
3425 |
+
[2023-07-04 15:47:22,534][22126] Updated weights for policy 0, policy_version 740 (0.0022)
|
3426 |
+
[2023-07-04 15:47:23,443][18333] Fps is (10 sec: 3276.8, 60 sec: 3413.3, 300 sec: 3485.1). Total num frames: 3035136. Throughput: 0: 878.0. Samples: 255792. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
3427 |
+
[2023-07-04 15:47:23,449][18333] Avg episode reward: [(0, '22.522')]
|
3428 |
+
[2023-07-04 15:47:28,444][18333] Fps is (10 sec: 4096.2, 60 sec: 3549.9, 300 sec: 3512.8). Total num frames: 3055616. Throughput: 0: 880.2. Samples: 262480. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
3429 |
+
[2023-07-04 15:47:28,446][18333] Avg episode reward: [(0, '23.105')]
|
3430 |
+
[2023-07-04 15:47:32,139][22126] Updated weights for policy 0, policy_version 750 (0.0012)
|
3431 |
+
[2023-07-04 15:47:33,446][18333] Fps is (10 sec: 3685.6, 60 sec: 3549.7, 300 sec: 3498.9). Total num frames: 3072000. Throughput: 0: 881.4. Samples: 265690. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
3432 |
+
[2023-07-04 15:47:33,454][18333] Avg episode reward: [(0, '22.716')]
|
3433 |
+
[2023-07-04 15:47:38,444][18333] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3499.0). Total num frames: 3088384. Throughput: 0: 881.5. Samples: 269878. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
3434 |
+
[2023-07-04 15:47:38,450][18333] Avg episode reward: [(0, '23.030')]
|
3435 |
+
[2023-07-04 15:47:43,443][18333] Fps is (10 sec: 3277.5, 60 sec: 3481.6, 300 sec: 3512.8). Total num frames: 3104768. Throughput: 0: 884.0. Samples: 274216. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
3436 |
+
[2023-07-04 15:47:43,451][18333] Avg episode reward: [(0, '22.901')]
|
3437 |
+
[2023-07-04 15:47:45,377][22126] Updated weights for policy 0, policy_version 760 (0.0024)
|
3438 |
+
[2023-07-04 15:47:48,444][18333] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3526.7). Total num frames: 3125248. Throughput: 0: 885.2. Samples: 277518. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
3439 |
+
[2023-07-04 15:47:48,451][18333] Avg episode reward: [(0, '22.048')]
|
3440 |
+
[2023-07-04 15:47:53,444][18333] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3526.7). Total num frames: 3145728. Throughput: 0: 885.5. Samples: 284154. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
3441 |
+
[2023-07-04 15:47:53,449][18333] Avg episode reward: [(0, '22.074')]
|
3442 |
+
[2023-07-04 15:47:55,522][22126] Updated weights for policy 0, policy_version 770 (0.0012)
|
3443 |
+
[2023-07-04 15:47:58,444][18333] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3499.0). Total num frames: 3158016. Throughput: 0: 882.9. Samples: 288832. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
3444 |
+
[2023-07-04 15:47:58,448][18333] Avg episode reward: [(0, '22.513')]
|
3445 |
+
[2023-07-04 15:47:58,459][22113] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000771_3158016.pth...
|
3446 |
+
[2023-07-04 15:47:58,641][22113] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000565_2314240.pth
|
3447 |
+
[2023-07-04 15:48:03,444][18333] Fps is (10 sec: 2867.0, 60 sec: 3481.6, 300 sec: 3498.9). Total num frames: 3174400. Throughput: 0: 880.9. Samples: 290812. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
3448 |
+
[2023-07-04 15:48:03,454][18333] Avg episode reward: [(0, '22.144')]
|
3449 |
+
[2023-07-04 15:48:08,381][22126] Updated weights for policy 0, policy_version 780 (0.0017)
|
3450 |
+
[2023-07-04 15:48:08,443][18333] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3526.7). Total num frames: 3194880. Throughput: 0: 881.0. Samples: 295436. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
3451 |
+
[2023-07-04 15:48:08,448][18333] Avg episode reward: [(0, '21.483')]
|
3452 |
+
[2023-07-04 15:48:13,444][18333] Fps is (10 sec: 4096.3, 60 sec: 3549.9, 300 sec: 3526.7). Total num frames: 3215360. Throughput: 0: 884.0. Samples: 302262. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
3453 |
+
[2023-07-04 15:48:13,445][18333] Avg episode reward: [(0, '22.172')]
|
3454 |
+
[2023-07-04 15:48:18,444][18333] Fps is (10 sec: 3686.4, 60 sec: 3618.2, 300 sec: 3499.0). Total num frames: 3231744. Throughput: 0: 885.5. Samples: 305536. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
3455 |
+
[2023-07-04 15:48:18,446][18333] Avg episode reward: [(0, '23.374')]
|
3456 |
+
[2023-07-04 15:48:18,935][22126] Updated weights for policy 0, policy_version 790 (0.0021)
|
3457 |
+
[2023-07-04 15:48:23,443][18333] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3499.0). Total num frames: 3248128. Throughput: 0: 885.7. Samples: 309734. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
|
3458 |
+
[2023-07-04 15:48:23,446][18333] Avg episode reward: [(0, '23.333')]
|
3459 |
+
[2023-07-04 15:48:28,444][18333] Fps is (10 sec: 2867.2, 60 sec: 3413.3, 300 sec: 3499.0). Total num frames: 3260416. Throughput: 0: 884.2. Samples: 314006. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
3460 |
+
[2023-07-04 15:48:28,447][18333] Avg episode reward: [(0, '23.753')]
|
3461 |
+
[2023-07-04 15:48:31,688][22126] Updated weights for policy 0, policy_version 800 (0.0014)
|
3462 |
+
[2023-07-04 15:48:33,444][18333] Fps is (10 sec: 3276.8, 60 sec: 3481.7, 300 sec: 3512.8). Total num frames: 3280896. Throughput: 0: 876.0. Samples: 316940. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
3463 |
+
[2023-07-04 15:48:33,446][18333] Avg episode reward: [(0, '23.964')]
|
3464 |
+
[2023-07-04 15:48:38,444][18333] Fps is (10 sec: 4505.6, 60 sec: 3618.1, 300 sec: 3526.7). Total num frames: 3305472. Throughput: 0: 873.5. Samples: 323462. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
3465 |
+
[2023-07-04 15:48:38,446][18333] Avg episode reward: [(0, '24.770')]
|
3466 |
+
[2023-07-04 15:48:38,458][22113] Saving new best policy, reward=24.770!
|
3467 |
+
[2023-07-04 15:48:42,139][22126] Updated weights for policy 0, policy_version 810 (0.0017)
|
3468 |
+
[2023-07-04 15:48:43,444][18333] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3499.0). Total num frames: 3317760. Throughput: 0: 879.9. Samples: 328428. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
3469 |
+
[2023-07-04 15:48:43,446][18333] Avg episode reward: [(0, '24.421')]
|
3470 |
+
[2023-07-04 15:48:48,444][18333] Fps is (10 sec: 2867.1, 60 sec: 3481.6, 300 sec: 3512.8). Total num frames: 3334144. Throughput: 0: 882.4. Samples: 330520. Policy #0 lag: (min: 0.0, avg: 0.3, max: 2.0)
|
3471 |
+
[2023-07-04 15:48:48,449][18333] Avg episode reward: [(0, '23.703')]
|
3472 |
+
[2023-07-04 15:48:53,444][18333] Fps is (10 sec: 3276.8, 60 sec: 3413.3, 300 sec: 3512.8). Total num frames: 3350528. Throughput: 0: 882.3. Samples: 335138. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
3473 |
+
[2023-07-04 15:48:53,448][18333] Avg episode reward: [(0, '23.282')]
|
3474 |
+
[2023-07-04 15:48:54,630][22126] Updated weights for policy 0, policy_version 820 (0.0031)
|
3475 |
+
[2023-07-04 15:48:58,444][18333] Fps is (10 sec: 3686.5, 60 sec: 3549.9, 300 sec: 3512.8). Total num frames: 3371008. Throughput: 0: 876.4. Samples: 341698. Policy #0 lag: (min: 0.0, avg: 0.3, max: 2.0)
|
3476 |
+
[2023-07-04 15:48:58,447][18333] Avg episode reward: [(0, '23.090')]
|
3477 |
+
[2023-07-04 15:49:03,444][18333] Fps is (10 sec: 4096.0, 60 sec: 3618.2, 300 sec: 3512.8). Total num frames: 3391488. Throughput: 0: 879.3. Samples: 345104. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
3478 |
+
[2023-07-04 15:49:03,448][18333] Avg episode reward: [(0, '22.572')]
|
3479 |
+
[2023-07-04 15:49:05,498][22126] Updated weights for policy 0, policy_version 830 (0.0012)
|
3480 |
+
[2023-07-04 15:49:08,448][18333] Fps is (10 sec: 3275.2, 60 sec: 3481.3, 300 sec: 3498.9). Total num frames: 3403776. Throughput: 0: 883.2. Samples: 349484. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
3481 |
+
[2023-07-04 15:49:08,451][18333] Avg episode reward: [(0, '23.881')]
|
3482 |
+
[2023-07-04 15:49:13,444][18333] Fps is (10 sec: 2867.1, 60 sec: 3413.3, 300 sec: 3512.8). Total num frames: 3420160. Throughput: 0: 883.4. Samples: 353758. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
3483 |
+
[2023-07-04 15:49:13,447][18333] Avg episode reward: [(0, '23.167')]
|
3484 |
+
[2023-07-04 15:49:17,533][22126] Updated weights for policy 0, policy_version 840 (0.0022)
|
3485 |
+
[2023-07-04 15:49:18,444][18333] Fps is (10 sec: 3688.1, 60 sec: 3481.6, 300 sec: 3526.7). Total num frames: 3440640. Throughput: 0: 883.9. Samples: 356714. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
3486 |
+
[2023-07-04 15:49:18,450][18333] Avg episode reward: [(0, '23.383')]
|
3487 |
+
[2023-07-04 15:49:23,444][18333] Fps is (10 sec: 4505.9, 60 sec: 3618.1, 300 sec: 3540.6). Total num frames: 3465216. Throughput: 0: 889.1. Samples: 363470. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
3488 |
+
[2023-07-04 15:49:23,450][18333] Avg episode reward: [(0, '22.627')]
|
3489 |
+
[2023-07-04 15:49:28,447][18333] Fps is (10 sec: 3685.1, 60 sec: 3617.9, 300 sec: 3512.8). Total num frames: 3477504. Throughput: 0: 890.7. Samples: 368512. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
3490 |
+
[2023-07-04 15:49:28,450][18333] Avg episode reward: [(0, '22.567')]
|
3491 |
+
[2023-07-04 15:49:28,852][22126] Updated weights for policy 0, policy_version 850 (0.0016)
|
3492 |
+
[2023-07-04 15:49:33,444][18333] Fps is (10 sec: 2867.2, 60 sec: 3549.9, 300 sec: 3512.9). Total num frames: 3493888. Throughput: 0: 889.2. Samples: 370536. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
3493 |
+
[2023-07-04 15:49:33,449][18333] Avg episode reward: [(0, '21.095')]
|
3494 |
+
[2023-07-04 15:49:38,444][18333] Fps is (10 sec: 3278.0, 60 sec: 3413.3, 300 sec: 3526.7). Total num frames: 3510272. Throughput: 0: 886.3. Samples: 375020. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
3495 |
+
[2023-07-04 15:49:38,446][18333] Avg episode reward: [(0, '19.977')]
|
3496 |
+
[2023-07-04 15:49:40,792][22126] Updated weights for policy 0, policy_version 860 (0.0012)
|
3497 |
+
[2023-07-04 15:49:43,444][18333] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3512.8). Total num frames: 3530752. Throughput: 0: 888.2. Samples: 381666. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
3498 |
+
[2023-07-04 15:49:43,448][18333] Avg episode reward: [(0, '21.107')]
|
3499 |
+
[2023-07-04 15:49:48,444][18333] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3512.8). Total num frames: 3551232. Throughput: 0: 888.3. Samples: 385078. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
3500 |
+
[2023-07-04 15:49:48,450][18333] Avg episode reward: [(0, '21.707')]
|
3501 |
+
[2023-07-04 15:49:52,023][22126] Updated weights for policy 0, policy_version 870 (0.0027)
|
3502 |
+
[2023-07-04 15:49:53,444][18333] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3512.8). Total num frames: 3563520. Throughput: 0: 889.1. Samples: 389490. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
3503 |
+
[2023-07-04 15:49:53,449][18333] Avg episode reward: [(0, '22.408')]
|
3504 |
+
[2023-07-04 15:49:58,444][18333] Fps is (10 sec: 2867.2, 60 sec: 3481.6, 300 sec: 3512.8). Total num frames: 3579904. Throughput: 0: 890.4. Samples: 393826. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
3505 |
+
[2023-07-04 15:49:58,450][18333] Avg episode reward: [(0, '22.977')]
|
3506 |
+
[2023-07-04 15:49:58,463][22113] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000874_3579904.pth...
|
3507 |
+
[2023-07-04 15:49:58,653][22113] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000669_2740224.pth
|
3508 |
+
[2023-07-04 15:50:03,444][18333] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3526.7). Total num frames: 3600384. Throughput: 0: 886.4. Samples: 396600. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
3509 |
+
[2023-07-04 15:50:03,450][18333] Avg episode reward: [(0, '23.005')]
|
3510 |
+
[2023-07-04 15:50:03,827][22126] Updated weights for policy 0, policy_version 880 (0.0014)
|
3511 |
+
[2023-07-04 15:50:08,445][18333] Fps is (10 sec: 4095.3, 60 sec: 3618.3, 300 sec: 3526.7). Total num frames: 3620864. Throughput: 0: 881.2. Samples: 403124. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
3512 |
+
[2023-07-04 15:50:08,448][18333] Avg episode reward: [(0, '24.137')]
|
3513 |
+
[2023-07-04 15:50:13,444][18333] Fps is (10 sec: 3686.4, 60 sec: 3618.2, 300 sec: 3512.8). Total num frames: 3637248. Throughput: 0: 884.1. Samples: 408292. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
3514 |
+
[2023-07-04 15:50:13,449][18333] Avg episode reward: [(0, '24.242')]
|
3515 |
+
[2023-07-04 15:50:15,313][22126] Updated weights for policy 0, policy_version 890 (0.0015)
|
3516 |
+
[2023-07-04 15:50:18,444][18333] Fps is (10 sec: 3277.4, 60 sec: 3549.9, 300 sec: 3526.7). Total num frames: 3653632. Throughput: 0: 886.8. Samples: 410444. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
3517 |
+
[2023-07-04 15:50:18,450][18333] Avg episode reward: [(0, '23.530')]
|
3518 |
+
[2023-07-04 15:50:23,444][18333] Fps is (10 sec: 3276.8, 60 sec: 3413.3, 300 sec: 3526.7). Total num frames: 3670016. Throughput: 0: 883.8. Samples: 414790. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
3519 |
+
[2023-07-04 15:50:23,448][18333] Avg episode reward: [(0, '22.577')]
|
3520 |
+
[2023-07-04 15:50:26,894][22126] Updated weights for policy 0, policy_version 900 (0.0023)
|
3521 |
+
[2023-07-04 15:50:28,444][18333] Fps is (10 sec: 3686.4, 60 sec: 3550.1, 300 sec: 3526.7). Total num frames: 3690496. Throughput: 0: 880.9. Samples: 421306. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
3522 |
+
[2023-07-04 15:50:28,446][18333] Avg episode reward: [(0, '23.107')]
|
3523 |
+
[2023-07-04 15:50:33,444][18333] Fps is (10 sec: 4096.1, 60 sec: 3618.1, 300 sec: 3526.7). Total num frames: 3710976. Throughput: 0: 878.8. Samples: 424622. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
3524 |
+
[2023-07-04 15:50:33,446][18333] Avg episode reward: [(0, '24.071')]
|
3525 |
+
[2023-07-04 15:50:38,425][22126] Updated weights for policy 0, policy_version 910 (0.0012)
|
3526 |
+
[2023-07-04 15:50:38,444][18333] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3526.8). Total num frames: 3727360. Throughput: 0: 882.0. Samples: 429182. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
3527 |
+
[2023-07-04 15:50:38,445][18333] Avg episode reward: [(0, '23.988')]
|
3528 |
+
[2023-07-04 15:50:43,445][18333] Fps is (10 sec: 2866.7, 60 sec: 3481.5, 300 sec: 3526.7). Total num frames: 3739648. Throughput: 0: 882.1. Samples: 433522. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
3529 |
+
[2023-07-04 15:50:43,448][18333] Avg episode reward: [(0, '23.037')]
|
3530 |
+
[2023-07-04 15:50:48,444][18333] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3540.6). Total num frames: 3760128. Throughput: 0: 879.2. Samples: 436166. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
3531 |
+
[2023-07-04 15:50:48,451][18333] Avg episode reward: [(0, '24.210')]
|
3532 |
+
[2023-07-04 15:50:49,985][22126] Updated weights for policy 0, policy_version 920 (0.0040)
|
3533 |
+
[2023-07-04 15:50:53,444][18333] Fps is (10 sec: 4096.7, 60 sec: 3618.1, 300 sec: 3526.7). Total num frames: 3780608. Throughput: 0: 883.6. Samples: 442886. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
3534 |
+
[2023-07-04 15:50:53,452][18333] Avg episode reward: [(0, '25.008')]
|
3535 |
+
[2023-07-04 15:50:53,455][22113] Saving new best policy, reward=25.008!
|
3536 |
+
[2023-07-04 15:50:58,444][18333] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3526.8). Total num frames: 3796992. Throughput: 0: 882.3. Samples: 447996. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
3537 |
+
[2023-07-04 15:50:58,450][18333] Avg episode reward: [(0, '24.569')]
|
3538 |
+
[2023-07-04 15:51:02,192][22126] Updated weights for policy 0, policy_version 930 (0.0018)
|
3539 |
+
[2023-07-04 15:51:03,444][18333] Fps is (10 sec: 2867.0, 60 sec: 3481.6, 300 sec: 3512.8). Total num frames: 3809280. Throughput: 0: 880.2. Samples: 450052. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
3540 |
+
[2023-07-04 15:51:03,449][18333] Avg episode reward: [(0, '24.889')]
|
3541 |
+
[2023-07-04 15:51:08,444][18333] Fps is (10 sec: 3276.8, 60 sec: 3481.7, 300 sec: 3540.6). Total num frames: 3829760. Throughput: 0: 878.9. Samples: 454340. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
3542 |
+
[2023-07-04 15:51:08,446][18333] Avg episode reward: [(0, '23.066')]
|
3543 |
+
[2023-07-04 15:51:13,187][22126] Updated weights for policy 0, policy_version 940 (0.0021)
|
3544 |
+
[2023-07-04 15:51:13,444][18333] Fps is (10 sec: 4096.3, 60 sec: 3549.9, 300 sec: 3540.6). Total num frames: 3850240. Throughput: 0: 883.0. Samples: 461040. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
3545 |
+
[2023-07-04 15:51:13,446][18333] Avg episode reward: [(0, '21.575')]
|
3546 |
+
[2023-07-04 15:51:18,444][18333] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3526.7). Total num frames: 3870720. Throughput: 0: 884.0. Samples: 464404. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
3547 |
+
[2023-07-04 15:51:18,450][18333] Avg episode reward: [(0, '21.145')]
|
3548 |
+
[2023-07-04 15:51:23,444][18333] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3526.7). Total num frames: 3883008. Throughput: 0: 887.0. Samples: 469096. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
3549 |
+
[2023-07-04 15:51:23,450][18333] Avg episode reward: [(0, '21.746')]
|
3550 |
+
[2023-07-04 15:51:25,562][22126] Updated weights for policy 0, policy_version 950 (0.0019)
|
3551 |
+
[2023-07-04 15:51:28,444][18333] Fps is (10 sec: 2867.2, 60 sec: 3481.6, 300 sec: 3526.7). Total num frames: 3899392. Throughput: 0: 883.7. Samples: 473286. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
3552 |
+
[2023-07-04 15:51:28,445][18333] Avg episode reward: [(0, '20.778')]
|
3553 |
+
[2023-07-04 15:51:33,443][18333] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3540.6). Total num frames: 3919872. Throughput: 0: 882.5. Samples: 475878. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
3554 |
+
[2023-07-04 15:51:33,447][18333] Avg episode reward: [(0, '22.279')]
|
3555 |
+
[2023-07-04 15:51:36,134][22126] Updated weights for policy 0, policy_version 960 (0.0030)
|
3556 |
+
[2023-07-04 15:51:38,444][18333] Fps is (10 sec: 4096.0, 60 sec: 3549.9, 300 sec: 3540.6). Total num frames: 3940352. Throughput: 0: 883.6. Samples: 482650. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
3557 |
+
[2023-07-04 15:51:38,446][18333] Avg episode reward: [(0, '22.723')]
|
3558 |
+
[2023-07-04 15:51:43,444][18333] Fps is (10 sec: 3686.4, 60 sec: 3618.2, 300 sec: 3526.7). Total num frames: 3956736. Throughput: 0: 889.6. Samples: 488030. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
3559 |
+
[2023-07-04 15:51:43,449][18333] Avg episode reward: [(0, '23.297')]
|
3560 |
+
[2023-07-04 15:51:48,444][18333] Fps is (10 sec: 2867.2, 60 sec: 3481.6, 300 sec: 3526.7). Total num frames: 3969024. Throughput: 0: 890.3. Samples: 490116. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
3561 |
+
[2023-07-04 15:51:48,450][18333] Avg episode reward: [(0, '24.658')]
|
3562 |
+
[2023-07-04 15:51:48,604][22126] Updated weights for policy 0, policy_version 970 (0.0014)
|
3563 |
+
[2023-07-04 15:51:53,444][18333] Fps is (10 sec: 3276.7, 60 sec: 3481.6, 300 sec: 3540.6). Total num frames: 3989504. Throughput: 0: 890.2. Samples: 494398. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
|
3564 |
+
[2023-07-04 15:51:53,446][18333] Avg episode reward: [(0, '23.262')]
|
3565 |
+
[2023-07-04 15:51:57,392][22113] Stopping Batcher_0...
|
3566 |
+
[2023-07-04 15:51:57,394][22113] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
|
3567 |
+
[2023-07-04 15:51:57,395][18333] Component Batcher_0 stopped!
|
3568 |
+
[2023-07-04 15:51:57,395][22113] Loop batcher_evt_loop terminating...
|
3569 |
+
[2023-07-04 15:51:57,459][18333] Component RolloutWorker_w0 stopped!
|
3570 |
+
[2023-07-04 15:51:57,460][22138] Stopping RolloutWorker_w7...
|
3571 |
+
[2023-07-04 15:51:57,465][18333] Component RolloutWorker_w7 stopped!
|
3572 |
+
[2023-07-04 15:51:57,473][22138] Loop rollout_proc7_evt_loop terminating...
|
3573 |
+
[2023-07-04 15:51:57,470][22130] Stopping RolloutWorker_w0...
|
3574 |
+
[2023-07-04 15:51:57,479][18333] Component RolloutWorker_w2 stopped!
|
3575 |
+
[2023-07-04 15:51:57,484][22134] Stopping RolloutWorker_w2...
|
3576 |
+
[2023-07-04 15:51:57,478][22126] Weights refcount: 2 0
|
3577 |
+
[2023-07-04 15:51:57,474][22130] Loop rollout_proc0_evt_loop terminating...
|
3578 |
+
[2023-07-04 15:51:57,492][22136] Stopping RolloutWorker_w5...
|
3579 |
+
[2023-07-04 15:51:57,491][18333] Component InferenceWorker_p0-w0 stopped!
|
3580 |
+
[2023-07-04 15:51:57,496][22132] Stopping RolloutWorker_w3...
|
3581 |
+
[2023-07-04 15:51:57,496][18333] Component RolloutWorker_w5 stopped!
|
3582 |
+
[2023-07-04 15:51:57,500][18333] Component RolloutWorker_w3 stopped!
|
3583 |
+
[2023-07-04 15:51:57,493][22136] Loop rollout_proc5_evt_loop terminating...
|
3584 |
+
[2023-07-04 15:51:57,497][22132] Loop rollout_proc3_evt_loop terminating...
|
3585 |
+
[2023-07-04 15:51:57,505][22126] Stopping InferenceWorker_p0-w0...
|
3586 |
+
[2023-07-04 15:51:57,506][18333] Component RolloutWorker_w1 stopped!
|
3587 |
+
[2023-07-04 15:51:57,506][22133] Stopping RolloutWorker_w1...
|
3588 |
+
[2023-07-04 15:51:57,505][22126] Loop inference_proc0-0_evt_loop terminating...
|
3589 |
+
[2023-07-04 15:51:57,485][22134] Loop rollout_proc2_evt_loop terminating...
|
3590 |
+
[2023-07-04 15:51:57,510][22133] Loop rollout_proc1_evt_loop terminating...
|
3591 |
+
[2023-07-04 15:51:57,520][18333] Component RolloutWorker_w4 stopped!
|
3592 |
+
[2023-07-04 15:51:57,524][22135] Stopping RolloutWorker_w4...
|
3593 |
+
[2023-07-04 15:51:57,525][18333] Component RolloutWorker_w6 stopped!
|
3594 |
+
[2023-07-04 15:51:57,531][22137] Stopping RolloutWorker_w6...
|
3595 |
+
[2023-07-04 15:51:57,538][22135] Loop rollout_proc4_evt_loop terminating...
|
3596 |
+
[2023-07-04 15:51:57,532][22137] Loop rollout_proc6_evt_loop terminating...
|
3597 |
+
[2023-07-04 15:51:57,552][22113] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000771_3158016.pth
|
3598 |
+
[2023-07-04 15:51:57,564][22113] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
|
3599 |
+
[2023-07-04 15:51:57,729][18333] Component LearnerWorker_p0 stopped!
|
3600 |
+
[2023-07-04 15:51:57,736][18333] Waiting for process learner_proc0 to stop...
|
3601 |
+
[2023-07-04 15:51:57,741][22113] Stopping LearnerWorker_p0...
|
3602 |
+
[2023-07-04 15:51:57,741][22113] Loop learner_proc0_evt_loop terminating...
|
3603 |
+
[2023-07-04 15:51:58,816][18333] Waiting for process inference_proc0-0 to join...
|
3604 |
+
[2023-07-04 15:51:58,821][18333] Waiting for process rollout_proc0 to join...
|
3605 |
+
[2023-07-04 15:52:00,036][18333] Waiting for process rollout_proc1 to join...
|
3606 |
+
[2023-07-04 15:52:00,199][18333] Waiting for process rollout_proc2 to join...
|
3607 |
+
[2023-07-04 15:52:00,201][18333] Waiting for process rollout_proc3 to join...
|
3608 |
+
[2023-07-04 15:52:00,205][18333] Waiting for process rollout_proc4 to join...
|
3609 |
+
[2023-07-04 15:52:00,208][18333] Waiting for process rollout_proc5 to join...
|
3610 |
+
[2023-07-04 15:52:00,211][18333] Waiting for process rollout_proc6 to join...
|
3611 |
+
[2023-07-04 15:52:00,215][18333] Waiting for process rollout_proc7 to join...
|
3612 |
+
[2023-07-04 15:52:00,217][18333] Batcher 0 profile tree view:
|
3613 |
+
batching: 13.2010, releasing_batches: 0.0137
|
3614 |
+
[2023-07-04 15:52:00,228][18333] InferenceWorker_p0-w0 profile tree view:
|
3615 |
+
wait_policy: 0.0055
|
3616 |
+
wait_policy_total: 279.2133
|
3617 |
+
update_model: 4.1925
|
3618 |
+
weight_update: 0.0025
|
3619 |
+
one_step: 0.0025
|
3620 |
+
handle_policy_step: 276.3185
|
3621 |
+
deserialize: 7.4829, stack: 1.5020, obs_to_device_normalize: 59.2136, forward: 138.0503, send_messages: 14.2878
|
3622 |
+
prepare_outputs: 42.0752
|
3623 |
+
to_cpu: 25.6465
|
3624 |
+
[2023-07-04 15:52:00,230][18333] Learner 0 profile tree view:
|
3625 |
+
misc: 0.0025, prepare_batch: 9.8375
|
3626 |
+
train: 39.7930
|
3627 |
+
epoch_init: 0.0146, minibatch_init: 0.0032, losses_postprocess: 0.2770, kl_divergence: 0.3191, after_optimizer: 1.9968
|
3628 |
+
calculate_losses: 12.3821
|
3629 |
+
losses_init: 0.0018, forward_head: 1.0349, bptt_initial: 7.7057, tail: 0.5452, advantages_returns: 0.1694, losses: 1.6271
|
3630 |
+
bptt: 1.1473
|
3631 |
+
bptt_forward_core: 1.0992
|
3632 |
+
update: 24.4481
|
3633 |
+
clip: 0.7666
|
3634 |
+
[2023-07-04 15:52:00,231][18333] RolloutWorker_w0 profile tree view:
|
3635 |
+
wait_for_trajectories: 0.2117, enqueue_policy_requests: 75.9103, env_step: 429.9611, overhead: 11.8503, complete_rollouts: 3.8320
|
3636 |
+
save_policy_outputs: 10.6986
|
3637 |
+
split_output_tensors: 5.2931
|
3638 |
+
[2023-07-04 15:52:00,234][18333] RolloutWorker_w7 profile tree view:
|
3639 |
+
wait_for_trajectories: 0.1973, enqueue_policy_requests: 80.5694, env_step: 427.3777, overhead: 11.8470, complete_rollouts: 3.6744
|
3640 |
+
save_policy_outputs: 10.4136
|
3641 |
+
split_output_tensors: 5.1270
|
3642 |
+
[2023-07-04 15:52:00,236][18333] Loop Runner_EvtLoop terminating...
|
3643 |
+
[2023-07-04 15:52:00,239][18333] Runner profile tree view:
|
3644 |
+
main_loop: 600.1708
|
3645 |
+
[2023-07-04 15:52:00,244][18333] Collected {0: 4005888}, FPS: 3330.5
|
3646 |
+
[2023-07-04 15:52:13,757][18333] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json
|
3647 |
+
[2023-07-04 15:52:13,759][18333] Overriding arg 'num_workers' with value 1 passed from command line
|
3648 |
+
[2023-07-04 15:52:13,761][18333] Adding new argument 'no_render'=True that is not in the saved config file!
|
3649 |
+
[2023-07-04 15:52:13,763][18333] Adding new argument 'save_video'=True that is not in the saved config file!
|
3650 |
+
[2023-07-04 15:52:13,764][18333] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
|
3651 |
+
[2023-07-04 15:52:13,766][18333] Adding new argument 'video_name'=None that is not in the saved config file!
|
3652 |
+
[2023-07-04 15:52:13,768][18333] Adding new argument 'max_num_frames'=100000 that is not in the saved config file!
|
3653 |
+
[2023-07-04 15:52:13,770][18333] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
|
3654 |
+
[2023-07-04 15:52:13,771][18333] Adding new argument 'push_to_hub'=True that is not in the saved config file!
|
3655 |
+
[2023-07-04 15:52:13,772][18333] Adding new argument 'hf_repository'='HilbertS/rl_course_vizdoom_health_gathering_supreme' that is not in the saved config file!
|
3656 |
+
[2023-07-04 15:52:13,773][18333] Adding new argument 'policy_index'=0 that is not in the saved config file!
|
3657 |
+
[2023-07-04 15:52:13,775][18333] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
|
3658 |
+
[2023-07-04 15:52:13,776][18333] Adding new argument 'train_script'=None that is not in the saved config file!
|
3659 |
+
[2023-07-04 15:52:13,777][18333] Adding new argument 'enjoy_script'=None that is not in the saved config file!
|
3660 |
+
[2023-07-04 15:52:13,778][18333] Using frameskip 1 and render_action_repeat=4 for evaluation
|
3661 |
+
[2023-07-04 15:52:13,801][18333] RunningMeanStd input shape: (3, 72, 128)
|
3662 |
+
[2023-07-04 15:52:13,803][18333] RunningMeanStd input shape: (1,)
|
3663 |
+
[2023-07-04 15:52:13,817][18333] ConvEncoder: input_channels=3
|
3664 |
+
[2023-07-04 15:52:13,852][18333] Conv encoder output size: 512
|
3665 |
+
[2023-07-04 15:52:13,853][18333] Policy head output size: 512
|
3666 |
+
[2023-07-04 15:52:13,874][18333] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
|
3667 |
+
[2023-07-04 15:52:14,537][18333] Num frames 100...
|
3668 |
+
[2023-07-04 15:52:14,657][18333] Num frames 200...
|
3669 |
+
[2023-07-04 15:52:14,787][18333] Num frames 300...
|
3670 |
+
[2023-07-04 15:52:14,937][18333] Num frames 400...
|
3671 |
+
[2023-07-04 15:52:15,068][18333] Num frames 500...
|
3672 |
+
[2023-07-04 15:52:15,218][18333] Num frames 600...
|
3673 |
+
[2023-07-04 15:52:15,353][18333] Num frames 700...
|
3674 |
+
[2023-07-04 15:52:15,493][18333] Num frames 800...
|
3675 |
+
[2023-07-04 15:52:15,632][18333] Num frames 900...
|
3676 |
+
[2023-07-04 15:52:15,754][18333] Num frames 1000...
|
3677 |
+
[2023-07-04 15:52:15,938][18333] Num frames 1100...
|
3678 |
+
[2023-07-04 15:52:16,144][18333] Num frames 1200...
|
3679 |
+
[2023-07-04 15:52:16,329][18333] Num frames 1300...
|
3680 |
+
[2023-07-04 15:52:16,515][18333] Num frames 1400...
|
3681 |
+
[2023-07-04 15:52:16,694][18333] Num frames 1500...
|
3682 |
+
[2023-07-04 15:52:16,885][18333] Num frames 1600...
|
3683 |
+
[2023-07-04 15:52:17,064][18333] Num frames 1700...
|
3684 |
+
[2023-07-04 15:52:17,250][18333] Num frames 1800...
|
3685 |
+
[2023-07-04 15:52:17,430][18333] Num frames 1900...
|
3686 |
+
[2023-07-04 15:52:17,609][18333] Num frames 2000...
|
3687 |
+
[2023-07-04 15:52:17,785][18333] Num frames 2100...
|
3688 |
+
[2023-07-04 15:52:17,840][18333] Avg episode rewards: #0: 54.999, true rewards: #0: 21.000
|
3689 |
+
[2023-07-04 15:52:17,843][18333] Avg episode reward: 54.999, avg true_objective: 21.000
|
3690 |
+
[2023-07-04 15:52:18,019][18333] Num frames 2200...
|
3691 |
+
[2023-07-04 15:52:18,202][18333] Num frames 2300...
|
3692 |
+
[2023-07-04 15:52:18,390][18333] Num frames 2400...
|
3693 |
+
[2023-07-04 15:52:18,579][18333] Num frames 2500...
|
3694 |
+
[2023-07-04 15:52:18,764][18333] Num frames 2600...
|
3695 |
+
[2023-07-04 15:52:18,945][18333] Num frames 2700...
|
3696 |
+
[2023-07-04 15:52:19,127][18333] Num frames 2800...
|
3697 |
+
[2023-07-04 15:52:19,311][18333] Num frames 2900...
|
3698 |
+
[2023-07-04 15:52:19,510][18333] Num frames 3000...
|
3699 |
+
[2023-07-04 15:52:19,694][18333] Num frames 3100...
|
3700 |
+
[2023-07-04 15:52:19,882][18333] Num frames 3200...
|
3701 |
+
[2023-07-04 15:52:20,067][18333] Num frames 3300...
|
3702 |
+
[2023-07-04 15:52:20,251][18333] Num frames 3400...
|
3703 |
+
[2023-07-04 15:52:20,436][18333] Num frames 3500...
|
3704 |
+
[2023-07-04 15:52:20,617][18333] Num frames 3600...
|
3705 |
+
[2023-07-04 15:52:20,802][18333] Num frames 3700...
|
3706 |
+
[2023-07-04 15:52:21,035][18333] Avg episode rewards: #0: 48.979, true rewards: #0: 18.980
|
3707 |
+
[2023-07-04 15:52:21,037][18333] Avg episode reward: 48.979, avg true_objective: 18.980
|
3708 |
+
[2023-07-04 15:52:21,047][18333] Num frames 3800...
|
3709 |
+
[2023-07-04 15:52:21,222][18333] Num frames 3900...
|
3710 |
+
[2023-07-04 15:52:21,353][18333] Num frames 4000...
|
3711 |
+
[2023-07-04 15:52:21,484][18333] Avg episode rewards: #0: 33.506, true rewards: #0: 13.507
|
3712 |
+
[2023-07-04 15:52:21,486][18333] Avg episode reward: 33.506, avg true_objective: 13.507
|
3713 |
+
[2023-07-04 15:52:21,552][18333] Num frames 4100...
|
3714 |
+
[2023-07-04 15:52:21,678][18333] Num frames 4200...
|
3715 |
+
[2023-07-04 15:52:21,808][18333] Num frames 4300...
|
3716 |
+
[2023-07-04 15:52:21,935][18333] Num frames 4400...
|
3717 |
+
[2023-07-04 15:52:22,072][18333] Num frames 4500...
|
3718 |
+
[2023-07-04 15:52:22,198][18333] Num frames 4600...
|
3719 |
+
[2023-07-04 15:52:22,332][18333] Num frames 4700...
|
3720 |
+
[2023-07-04 15:52:22,462][18333] Num frames 4800...
|
3721 |
+
[2023-07-04 15:52:22,603][18333] Num frames 4900...
|
3722 |
+
[2023-07-04 15:52:22,735][18333] Num frames 5000...
|
3723 |
+
[2023-07-04 15:52:22,869][18333] Num frames 5100...
|
3724 |
+
[2023-07-04 15:52:23,009][18333] Num frames 5200...
|
3725 |
+
[2023-07-04 15:52:23,142][18333] Num frames 5300...
|
3726 |
+
[2023-07-04 15:52:23,285][18333] Num frames 5400...
|
3727 |
+
[2023-07-04 15:52:23,422][18333] Num frames 5500...
|
3728 |
+
[2023-07-04 15:52:23,551][18333] Num frames 5600...
|
3729 |
+
[2023-07-04 15:52:23,632][18333] Avg episode rewards: #0: 35.050, true rewards: #0: 14.050
|
3730 |
+
[2023-07-04 15:52:23,634][18333] Avg episode reward: 35.050, avg true_objective: 14.050
|
3731 |
+
[2023-07-04 15:52:23,740][18333] Num frames 5700...
|
3732 |
+
[2023-07-04 15:52:23,866][18333] Num frames 5800...
|
3733 |
+
[2023-07-04 15:52:23,992][18333] Num frames 5900...
|
3734 |
+
[2023-07-04 15:52:24,140][18333] Avg episode rewards: #0: 28.944, true rewards: #0: 11.944
|
3735 |
+
[2023-07-04 15:52:24,145][18333] Avg episode reward: 28.944, avg true_objective: 11.944
|
3736 |
+
[2023-07-04 15:52:24,180][18333] Num frames 6000...
|
3737 |
+
[2023-07-04 15:52:24,305][18333] Num frames 6100...
|
3738 |
+
[2023-07-04 15:52:24,485][18333] Avg episode rewards: #0: 24.493, true rewards: #0: 10.327
|
3739 |
+
[2023-07-04 15:52:24,487][18333] Avg episode reward: 24.493, avg true_objective: 10.327
|
3740 |
+
[2023-07-04 15:52:24,497][18333] Num frames 6200...
|
3741 |
+
[2023-07-04 15:52:24,632][18333] Num frames 6300...
|
3742 |
+
[2023-07-04 15:52:24,754][18333] Num frames 6400...
|
3743 |
+
[2023-07-04 15:52:24,880][18333] Num frames 6500...
|
3744 |
+
[2023-07-04 15:52:24,999][18333] Num frames 6600...
|
3745 |
+
[2023-07-04 15:52:25,128][18333] Num frames 6700...
|
3746 |
+
[2023-07-04 15:52:25,257][18333] Num frames 6800...
|
3747 |
+
[2023-07-04 15:52:25,402][18333] Num frames 6900...
|
3748 |
+
[2023-07-04 15:52:25,545][18333] Num frames 7000...
|
3749 |
+
[2023-07-04 15:52:25,669][18333] Num frames 7100...
|
3750 |
+
[2023-07-04 15:52:25,796][18333] Num frames 7200...
|
3751 |
+
[2023-07-04 15:52:25,922][18333] Num frames 7300...
|
3752 |
+
[2023-07-04 15:52:26,053][18333] Num frames 7400...
|
3753 |
+
[2023-07-04 15:52:26,199][18333] Num frames 7500...
|
3754 |
+
[2023-07-04 15:52:26,327][18333] Num frames 7600...
|
3755 |
+
[2023-07-04 15:52:26,475][18333] Avg episode rewards: #0: 26.954, true rewards: #0: 10.954
|
3756 |
+
[2023-07-04 15:52:26,478][18333] Avg episode reward: 26.954, avg true_objective: 10.954
|
3757 |
+
[2023-07-04 15:52:26,517][18333] Num frames 7700...
|
3758 |
+
[2023-07-04 15:52:26,643][18333] Num frames 7800...
|
3759 |
+
[2023-07-04 15:52:26,774][18333] Num frames 7900...
|
3760 |
+
[2023-07-04 15:52:26,895][18333] Num frames 8000...
|
3761 |
+
[2023-07-04 15:52:27,014][18333] Num frames 8100...
|
3762 |
+
[2023-07-04 15:52:27,138][18333] Num frames 8200...
|
3763 |
+
[2023-07-04 15:52:27,258][18333] Num frames 8300...
|
3764 |
+
[2023-07-04 15:52:27,380][18333] Num frames 8400...
|
3765 |
+
[2023-07-04 15:52:27,517][18333] Num frames 8500...
|
3766 |
+
[2023-07-04 15:52:27,656][18333] Avg episode rewards: #0: 25.955, true rewards: #0: 10.705
|
3767 |
+
[2023-07-04 15:52:27,658][18333] Avg episode reward: 25.955, avg true_objective: 10.705
|
3768 |
+
[2023-07-04 15:52:27,703][18333] Num frames 8600...
|
3769 |
+
[2023-07-04 15:52:27,870][18333] Avg episode rewards: #0: 23.213, true rewards: #0: 9.658
|
3770 |
+
[2023-07-04 15:52:27,872][18333] Avg episode reward: 23.213, avg true_objective: 9.658
|
3771 |
+
[2023-07-04 15:52:27,885][18333] Num frames 8700...
|
3772 |
+
[2023-07-04 15:52:28,010][18333] Num frames 8800...
|
3773 |
+
[2023-07-04 15:52:28,130][18333] Num frames 8900...
|
3774 |
+
[2023-07-04 15:52:28,258][18333] Num frames 9000...
|
3775 |
+
[2023-07-04 15:52:28,381][18333] Num frames 9100...
|
3776 |
+
[2023-07-04 15:52:28,518][18333] Num frames 9200...
|
3777 |
+
[2023-07-04 15:52:28,579][18333] Avg episode rewards: #0: 21.704, true rewards: #0: 9.204
|
3778 |
+
[2023-07-04 15:52:28,581][18333] Avg episode reward: 21.704, avg true_objective: 9.204
|
3779 |
+
[2023-07-04 15:53:26,952][18333] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|