Upload folder using huggingface_hub
Browse files
.summary/0/events.out.tfevents.1725611727.4ed841473a2d
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:51b33602fc2d9b7793fc2a9349e7f3a559aa224a0cfa21bf34b85c1864d333d1
|
3 |
+
size 201167
|
README.md
CHANGED
@@ -15,7 +15,7 @@ model-index:
|
|
15 |
type: doom_health_gathering_supreme
|
16 |
metrics:
|
17 |
- type: mean_reward
|
18 |
-
value:
|
19 |
name: mean_reward
|
20 |
verified: false
|
21 |
---
|
|
|
15 |
type: doom_health_gathering_supreme
|
16 |
metrics:
|
17 |
- type: mean_reward
|
18 |
+
value: 11.86 +/- 6.87
|
19 |
name: mean_reward
|
20 |
verified: false
|
21 |
---
|
checkpoint_p0/best_000001219_4993024_reward_26.655.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5a9876417ead596340c06307b52e741952dd2440efed40c0c06943e24c4e79fb
|
3 |
+
size 34929243
|
checkpoint_p0/checkpoint_000001172_4800512.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:335c248786681602b347c6fa27d673a423f57e3d12e8e07313cafd8d14757367
|
3 |
+
size 34929669
|
checkpoint_p0/checkpoint_000001222_5005312.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:515fd779429a9ca0c2a849321843ca16dcb6030f9fc11e272995e1ee79f95d34
|
3 |
+
size 34929669
|
config.json
CHANGED
@@ -65,7 +65,7 @@
|
|
65 |
"summaries_use_frameskip": true,
|
66 |
"heartbeat_interval": 20,
|
67 |
"heartbeat_reporting_interval": 600,
|
68 |
-
"train_for_env_steps":
|
69 |
"train_for_seconds": 10000000000,
|
70 |
"save_every_sec": 120,
|
71 |
"keep_checkpoints": 2,
|
|
|
65 |
"summaries_use_frameskip": true,
|
66 |
"heartbeat_interval": 20,
|
67 |
"heartbeat_reporting_interval": 600,
|
68 |
+
"train_for_env_steps": 5000000,
|
69 |
"train_for_seconds": 10000000000,
|
70 |
"save_every_sec": 120,
|
71 |
"keep_checkpoints": 2,
|
replay.mp4
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c49871602fc5a116f18f6ade3e48d8382390811cf358f940d7063b0c919ff633
|
3 |
+
size 22845820
|
sf_log.txt
CHANGED
@@ -1058,3 +1058,847 @@ main_loop: 1081.6875
|
|
1058 |
[2024-09-06 08:29:55,437][01070] Avg episode rewards: #0: 13.847, true rewards: #0: 7.247
|
1059 |
[2024-09-06 08:29:55,439][01070] Avg episode reward: 13.847, avg true_objective: 7.247
|
1060 |
[2024-09-06 08:30:39,348][01070] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1058 |
[2024-09-06 08:29:55,437][01070] Avg episode rewards: #0: 13.847, true rewards: #0: 7.247
|
1059 |
[2024-09-06 08:29:55,439][01070] Avg episode reward: 13.847, avg true_objective: 7.247
|
1060 |
[2024-09-06 08:30:39,348][01070] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|
1061 |
+
[2024-09-06 08:30:46,004][01070] The model has been pushed to https://huggingface.co/Re-Re/rl_course_vizdoom_health_gathering_supreme
|
1062 |
+
[2024-09-06 08:35:27,232][01070] Environment doom_basic already registered, overwriting...
|
1063 |
+
[2024-09-06 08:35:27,234][01070] Environment doom_two_colors_easy already registered, overwriting...
|
1064 |
+
[2024-09-06 08:35:27,237][01070] Environment doom_two_colors_hard already registered, overwriting...
|
1065 |
+
[2024-09-06 08:35:27,238][01070] Environment doom_dm already registered, overwriting...
|
1066 |
+
[2024-09-06 08:35:27,240][01070] Environment doom_dwango5 already registered, overwriting...
|
1067 |
+
[2024-09-06 08:35:27,241][01070] Environment doom_my_way_home_flat_actions already registered, overwriting...
|
1068 |
+
[2024-09-06 08:35:27,242][01070] Environment doom_defend_the_center_flat_actions already registered, overwriting...
|
1069 |
+
[2024-09-06 08:35:27,243][01070] Environment doom_my_way_home already registered, overwriting...
|
1070 |
+
[2024-09-06 08:35:27,244][01070] Environment doom_deadly_corridor already registered, overwriting...
|
1071 |
+
[2024-09-06 08:35:27,245][01070] Environment doom_defend_the_center already registered, overwriting...
|
1072 |
+
[2024-09-06 08:35:27,246][01070] Environment doom_defend_the_line already registered, overwriting...
|
1073 |
+
[2024-09-06 08:35:27,247][01070] Environment doom_health_gathering already registered, overwriting...
|
1074 |
+
[2024-09-06 08:35:27,248][01070] Environment doom_health_gathering_supreme already registered, overwriting...
|
1075 |
+
[2024-09-06 08:35:27,250][01070] Environment doom_battle already registered, overwriting...
|
1076 |
+
[2024-09-06 08:35:27,251][01070] Environment doom_battle2 already registered, overwriting...
|
1077 |
+
[2024-09-06 08:35:27,252][01070] Environment doom_duel_bots already registered, overwriting...
|
1078 |
+
[2024-09-06 08:35:27,253][01070] Environment doom_deathmatch_bots already registered, overwriting...
|
1079 |
+
[2024-09-06 08:35:27,254][01070] Environment doom_duel already registered, overwriting...
|
1080 |
+
[2024-09-06 08:35:27,255][01070] Environment doom_deathmatch_full already registered, overwriting...
|
1081 |
+
[2024-09-06 08:35:27,256][01070] Environment doom_benchmark already registered, overwriting...
|
1082 |
+
[2024-09-06 08:35:27,257][01070] register_encoder_factory: <function make_vizdoom_encoder at 0x78dc5537e170>
|
1083 |
+
[2024-09-06 08:35:27,282][01070] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json
|
1084 |
+
[2024-09-06 08:35:27,284][01070] Overriding arg 'train_for_env_steps' with value 5000000 passed from command line
|
1085 |
+
[2024-09-06 08:35:27,291][01070] Experiment dir /content/train_dir/default_experiment already exists!
|
1086 |
+
[2024-09-06 08:35:27,293][01070] Resuming existing experiment from /content/train_dir/default_experiment...
|
1087 |
+
[2024-09-06 08:35:27,295][01070] Weights and Biases integration disabled
|
1088 |
+
[2024-09-06 08:35:27,298][01070] Environment var CUDA_VISIBLE_DEVICES is 0
|
1089 |
+
|
1090 |
+
[2024-09-06 08:35:29,357][01070] Starting experiment with the following configuration:
|
1091 |
+
help=False
|
1092 |
+
algo=APPO
|
1093 |
+
env=doom_health_gathering_supreme
|
1094 |
+
experiment=default_experiment
|
1095 |
+
train_dir=/content/train_dir
|
1096 |
+
restart_behavior=resume
|
1097 |
+
device=gpu
|
1098 |
+
seed=None
|
1099 |
+
num_policies=1
|
1100 |
+
async_rl=True
|
1101 |
+
serial_mode=False
|
1102 |
+
batched_sampling=False
|
1103 |
+
num_batches_to_accumulate=2
|
1104 |
+
worker_num_splits=2
|
1105 |
+
policy_workers_per_policy=1
|
1106 |
+
max_policy_lag=1000
|
1107 |
+
num_workers=8
|
1108 |
+
num_envs_per_worker=4
|
1109 |
+
batch_size=1024
|
1110 |
+
num_batches_per_epoch=1
|
1111 |
+
num_epochs=1
|
1112 |
+
rollout=32
|
1113 |
+
recurrence=32
|
1114 |
+
shuffle_minibatches=False
|
1115 |
+
gamma=0.99
|
1116 |
+
reward_scale=1.0
|
1117 |
+
reward_clip=1000.0
|
1118 |
+
value_bootstrap=False
|
1119 |
+
normalize_returns=True
|
1120 |
+
exploration_loss_coeff=0.001
|
1121 |
+
value_loss_coeff=0.5
|
1122 |
+
kl_loss_coeff=0.0
|
1123 |
+
exploration_loss=symmetric_kl
|
1124 |
+
gae_lambda=0.95
|
1125 |
+
ppo_clip_ratio=0.1
|
1126 |
+
ppo_clip_value=0.2
|
1127 |
+
with_vtrace=False
|
1128 |
+
vtrace_rho=1.0
|
1129 |
+
vtrace_c=1.0
|
1130 |
+
optimizer=adam
|
1131 |
+
adam_eps=1e-06
|
1132 |
+
adam_beta1=0.9
|
1133 |
+
adam_beta2=0.999
|
1134 |
+
max_grad_norm=4.0
|
1135 |
+
learning_rate=0.0001
|
1136 |
+
lr_schedule=constant
|
1137 |
+
lr_schedule_kl_threshold=0.008
|
1138 |
+
lr_adaptive_min=1e-06
|
1139 |
+
lr_adaptive_max=0.01
|
1140 |
+
obs_subtract_mean=0.0
|
1141 |
+
obs_scale=255.0
|
1142 |
+
normalize_input=True
|
1143 |
+
normalize_input_keys=None
|
1144 |
+
decorrelate_experience_max_seconds=0
|
1145 |
+
decorrelate_envs_on_one_worker=True
|
1146 |
+
actor_worker_gpus=[]
|
1147 |
+
set_workers_cpu_affinity=True
|
1148 |
+
force_envs_single_thread=False
|
1149 |
+
default_niceness=0
|
1150 |
+
log_to_file=True
|
1151 |
+
experiment_summaries_interval=10
|
1152 |
+
flush_summaries_interval=30
|
1153 |
+
stats_avg=100
|
1154 |
+
summaries_use_frameskip=True
|
1155 |
+
heartbeat_interval=20
|
1156 |
+
heartbeat_reporting_interval=600
|
1157 |
+
train_for_env_steps=5000000
|
1158 |
+
train_for_seconds=10000000000
|
1159 |
+
save_every_sec=120
|
1160 |
+
keep_checkpoints=2
|
1161 |
+
load_checkpoint_kind=latest
|
1162 |
+
save_milestones_sec=-1
|
1163 |
+
save_best_every_sec=5
|
1164 |
+
save_best_metric=reward
|
1165 |
+
save_best_after=100000
|
1166 |
+
benchmark=False
|
1167 |
+
encoder_mlp_layers=[512, 512]
|
1168 |
+
encoder_conv_architecture=convnet_simple
|
1169 |
+
encoder_conv_mlp_layers=[512]
|
1170 |
+
use_rnn=True
|
1171 |
+
rnn_size=512
|
1172 |
+
rnn_type=gru
|
1173 |
+
rnn_num_layers=1
|
1174 |
+
decoder_mlp_layers=[]
|
1175 |
+
nonlinearity=elu
|
1176 |
+
policy_initialization=orthogonal
|
1177 |
+
policy_init_gain=1.0
|
1178 |
+
actor_critic_share_weights=True
|
1179 |
+
adaptive_stddev=True
|
1180 |
+
continuous_tanh_scale=0.0
|
1181 |
+
initial_stddev=1.0
|
1182 |
+
use_env_info_cache=False
|
1183 |
+
env_gpu_actions=False
|
1184 |
+
env_gpu_observations=True
|
1185 |
+
env_frameskip=4
|
1186 |
+
env_framestack=1
|
1187 |
+
pixel_format=CHW
|
1188 |
+
use_record_episode_statistics=False
|
1189 |
+
with_wandb=False
|
1190 |
+
wandb_user=None
|
1191 |
+
wandb_project=sample_factory
|
1192 |
+
wandb_group=None
|
1193 |
+
wandb_job_type=SF
|
1194 |
+
wandb_tags=[]
|
1195 |
+
with_pbt=False
|
1196 |
+
pbt_mix_policies_in_one_env=True
|
1197 |
+
pbt_period_env_steps=5000000
|
1198 |
+
pbt_start_mutation=20000000
|
1199 |
+
pbt_replace_fraction=0.3
|
1200 |
+
pbt_mutation_rate=0.15
|
1201 |
+
pbt_replace_reward_gap=0.1
|
1202 |
+
pbt_replace_reward_gap_absolute=1e-06
|
1203 |
+
pbt_optimize_gamma=False
|
1204 |
+
pbt_target_objective=true_objective
|
1205 |
+
pbt_perturb_min=1.1
|
1206 |
+
pbt_perturb_max=1.5
|
1207 |
+
num_agents=-1
|
1208 |
+
num_humans=0
|
1209 |
+
num_bots=-1
|
1210 |
+
start_bot_difficulty=None
|
1211 |
+
timelimit=None
|
1212 |
+
res_w=128
|
1213 |
+
res_h=72
|
1214 |
+
wide_aspect_ratio=False
|
1215 |
+
eval_env_frameskip=1
|
1216 |
+
fps=35
|
1217 |
+
command_line=--env=doom_health_gathering_supreme --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=4000000
|
1218 |
+
cli_args={'env': 'doom_health_gathering_supreme', 'num_workers': 8, 'num_envs_per_worker': 4, 'train_for_env_steps': 4000000}
|
1219 |
+
git_hash=unknown
|
1220 |
+
git_repo_name=not a git repository
|
1221 |
+
[2024-09-06 08:35:29,359][01070] Saving configuration to /content/train_dir/default_experiment/config.json...
|
1222 |
+
[2024-09-06 08:35:29,362][01070] Rollout worker 0 uses device cpu
|
1223 |
+
[2024-09-06 08:35:29,364][01070] Rollout worker 1 uses device cpu
|
1224 |
+
[2024-09-06 08:35:29,365][01070] Rollout worker 2 uses device cpu
|
1225 |
+
[2024-09-06 08:35:29,366][01070] Rollout worker 3 uses device cpu
|
1226 |
+
[2024-09-06 08:35:29,368][01070] Rollout worker 4 uses device cpu
|
1227 |
+
[2024-09-06 08:35:29,369][01070] Rollout worker 5 uses device cpu
|
1228 |
+
[2024-09-06 08:35:29,370][01070] Rollout worker 6 uses device cpu
|
1229 |
+
[2024-09-06 08:35:29,372][01070] Rollout worker 7 uses device cpu
|
1230 |
+
[2024-09-06 08:35:29,446][01070] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
1231 |
+
[2024-09-06 08:35:29,447][01070] InferenceWorker_p0-w0: min num requests: 2
|
1232 |
+
[2024-09-06 08:35:29,485][01070] Starting all processes...
|
1233 |
+
[2024-09-06 08:35:29,486][01070] Starting process learner_proc0
|
1234 |
+
[2024-09-06 08:35:29,535][01070] Starting all processes...
|
1235 |
+
[2024-09-06 08:35:29,540][01070] Starting process inference_proc0-0
|
1236 |
+
[2024-09-06 08:35:29,541][01070] Starting process rollout_proc0
|
1237 |
+
[2024-09-06 08:35:29,541][01070] Starting process rollout_proc1
|
1238 |
+
[2024-09-06 08:35:29,541][01070] Starting process rollout_proc2
|
1239 |
+
[2024-09-06 08:35:29,541][01070] Starting process rollout_proc3
|
1240 |
+
[2024-09-06 08:35:29,541][01070] Starting process rollout_proc4
|
1241 |
+
[2024-09-06 08:35:29,541][01070] Starting process rollout_proc5
|
1242 |
+
[2024-09-06 08:35:29,736][01070] Starting process rollout_proc7
|
1243 |
+
[2024-09-06 08:35:29,753][01070] Starting process rollout_proc6
|
1244 |
+
[2024-09-06 08:35:43,326][19093] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
1245 |
+
[2024-09-06 08:35:43,327][19093] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0
|
1246 |
+
[2024-09-06 08:35:43,391][19093] Num visible devices: 1
|
1247 |
+
[2024-09-06 08:35:43,432][19093] Starting seed is not provided
|
1248 |
+
[2024-09-06 08:35:43,433][19093] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
1249 |
+
[2024-09-06 08:35:43,434][19093] Initializing actor-critic model on device cuda:0
|
1250 |
+
[2024-09-06 08:35:43,434][19093] RunningMeanStd input shape: (3, 72, 128)
|
1251 |
+
[2024-09-06 08:35:43,436][19093] RunningMeanStd input shape: (1,)
|
1252 |
+
[2024-09-06 08:35:43,522][19093] ConvEncoder: input_channels=3
|
1253 |
+
[2024-09-06 08:35:44,545][19093] Conv encoder output size: 512
|
1254 |
+
[2024-09-06 08:35:44,548][19093] Policy head output size: 512
|
1255 |
+
[2024-09-06 08:35:44,679][19093] Created Actor Critic model with architecture:
|
1256 |
+
[2024-09-06 08:35:44,680][19093] ActorCriticSharedWeights(
|
1257 |
+
(obs_normalizer): ObservationNormalizer(
|
1258 |
+
(running_mean_std): RunningMeanStdDictInPlace(
|
1259 |
+
(running_mean_std): ModuleDict(
|
1260 |
+
(obs): RunningMeanStdInPlace()
|
1261 |
+
)
|
1262 |
+
)
|
1263 |
+
)
|
1264 |
+
(returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace)
|
1265 |
+
(encoder): VizdoomEncoder(
|
1266 |
+
(basic_encoder): ConvEncoder(
|
1267 |
+
(enc): RecursiveScriptModule(
|
1268 |
+
original_name=ConvEncoderImpl
|
1269 |
+
(conv_head): RecursiveScriptModule(
|
1270 |
+
original_name=Sequential
|
1271 |
+
(0): RecursiveScriptModule(original_name=Conv2d)
|
1272 |
+
(1): RecursiveScriptModule(original_name=ELU)
|
1273 |
+
(2): RecursiveScriptModule(original_name=Conv2d)
|
1274 |
+
(3): RecursiveScriptModule(original_name=ELU)
|
1275 |
+
(4): RecursiveScriptModule(original_name=Conv2d)
|
1276 |
+
(5): RecursiveScriptModule(original_name=ELU)
|
1277 |
+
)
|
1278 |
+
(mlp_layers): RecursiveScriptModule(
|
1279 |
+
original_name=Sequential
|
1280 |
+
(0): RecursiveScriptModule(original_name=Linear)
|
1281 |
+
(1): RecursiveScriptModule(original_name=ELU)
|
1282 |
+
)
|
1283 |
+
)
|
1284 |
+
)
|
1285 |
+
)
|
1286 |
+
(core): ModelCoreRNN(
|
1287 |
+
(core): GRU(512, 512)
|
1288 |
+
)
|
1289 |
+
(decoder): MlpDecoder(
|
1290 |
+
(mlp): Identity()
|
1291 |
+
)
|
1292 |
+
(critic_linear): Linear(in_features=512, out_features=1, bias=True)
|
1293 |
+
(action_parameterization): ActionParameterizationDefault(
|
1294 |
+
(distribution_linear): Linear(in_features=512, out_features=5, bias=True)
|
1295 |
+
)
|
1296 |
+
)
|
1297 |
+
[2024-09-06 08:35:45,399][19110] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
1298 |
+
[2024-09-06 08:35:45,404][19110] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0
|
1299 |
+
[2024-09-06 08:35:45,627][19116] Worker 5 uses CPU cores [1]
|
1300 |
+
[2024-09-06 08:35:45,628][19113] Worker 2 uses CPU cores [0]
|
1301 |
+
[2024-09-06 08:35:45,638][19110] Num visible devices: 1
|
1302 |
+
[2024-09-06 08:35:45,693][19114] Worker 4 uses CPU cores [0]
|
1303 |
+
[2024-09-06 08:35:45,750][19093] Using optimizer <class 'torch.optim.adam.Adam'>
|
1304 |
+
[2024-09-06 08:35:45,925][19111] Worker 1 uses CPU cores [1]
|
1305 |
+
[2024-09-06 08:35:45,964][19117] Worker 7 uses CPU cores [1]
|
1306 |
+
[2024-09-06 08:35:46,011][19118] Worker 6 uses CPU cores [0]
|
1307 |
+
[2024-09-06 08:35:46,116][19112] Worker 0 uses CPU cores [0]
|
1308 |
+
[2024-09-06 08:35:46,119][19115] Worker 3 uses CPU cores [1]
|
1309 |
+
[2024-09-06 08:35:46,846][19093] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
|
1310 |
+
[2024-09-06 08:35:46,893][19093] Loading model from checkpoint
|
1311 |
+
[2024-09-06 08:35:46,895][19093] Loaded experiment state at self.train_step=978, self.env_steps=4005888
|
1312 |
+
[2024-09-06 08:35:46,896][19093] Initialized policy 0 weights for model version 978
|
1313 |
+
[2024-09-06 08:35:46,906][19093] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
1314 |
+
[2024-09-06 08:35:46,914][19093] LearnerWorker_p0 finished initialization!
|
1315 |
+
[2024-09-06 08:35:47,083][19110] RunningMeanStd input shape: (3, 72, 128)
|
1316 |
+
[2024-09-06 08:35:47,085][19110] RunningMeanStd input shape: (1,)
|
1317 |
+
[2024-09-06 08:35:47,103][19110] ConvEncoder: input_channels=3
|
1318 |
+
[2024-09-06 08:35:47,256][19110] Conv encoder output size: 512
|
1319 |
+
[2024-09-06 08:35:47,257][19110] Policy head output size: 512
|
1320 |
+
[2024-09-06 08:35:47,299][01070] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 4005888. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
1321 |
+
[2024-09-06 08:35:47,343][01070] Inference worker 0-0 is ready!
|
1322 |
+
[2024-09-06 08:35:47,345][01070] All inference workers are ready! Signal rollout workers to start!
|
1323 |
+
[2024-09-06 08:35:47,666][19115] Doom resolution: 160x120, resize resolution: (128, 72)
|
1324 |
+
[2024-09-06 08:35:47,759][19117] Doom resolution: 160x120, resize resolution: (128, 72)
|
1325 |
+
[2024-09-06 08:35:47,841][19111] Doom resolution: 160x120, resize resolution: (128, 72)
|
1326 |
+
[2024-09-06 08:35:47,847][19116] Doom resolution: 160x120, resize resolution: (128, 72)
|
1327 |
+
[2024-09-06 08:35:47,859][19118] Doom resolution: 160x120, resize resolution: (128, 72)
|
1328 |
+
[2024-09-06 08:35:47,902][19114] Doom resolution: 160x120, resize resolution: (128, 72)
|
1329 |
+
[2024-09-06 08:35:47,911][19113] Doom resolution: 160x120, resize resolution: (128, 72)
|
1330 |
+
[2024-09-06 08:35:47,923][19112] Doom resolution: 160x120, resize resolution: (128, 72)
|
1331 |
+
[2024-09-06 08:35:49,437][01070] Heartbeat connected on Batcher_0
|
1332 |
+
[2024-09-06 08:35:49,445][01070] Heartbeat connected on LearnerWorker_p0
|
1333 |
+
[2024-09-06 08:35:49,476][01070] Heartbeat connected on InferenceWorker_p0-w0
|
1334 |
+
[2024-09-06 08:35:49,689][19112] Decorrelating experience for 0 frames...
|
1335 |
+
[2024-09-06 08:35:49,691][19113] Decorrelating experience for 0 frames...
|
1336 |
+
[2024-09-06 08:35:49,916][19115] Decorrelating experience for 0 frames...
|
1337 |
+
[2024-09-06 08:35:49,947][19117] Decorrelating experience for 0 frames...
|
1338 |
+
[2024-09-06 08:35:50,051][19111] Decorrelating experience for 0 frames...
|
1339 |
+
[2024-09-06 08:35:50,057][19116] Decorrelating experience for 0 frames...
|
1340 |
+
[2024-09-06 08:35:50,424][19112] Decorrelating experience for 32 frames...
|
1341 |
+
[2024-09-06 08:35:51,332][19117] Decorrelating experience for 32 frames...
|
1342 |
+
[2024-09-06 08:35:51,378][19115] Decorrelating experience for 32 frames...
|
1343 |
+
[2024-09-06 08:35:51,423][19111] Decorrelating experience for 32 frames...
|
1344 |
+
[2024-09-06 08:35:51,456][19114] Decorrelating experience for 0 frames...
|
1345 |
+
[2024-09-06 08:35:51,798][19113] Decorrelating experience for 32 frames...
|
1346 |
+
[2024-09-06 08:35:52,103][19112] Decorrelating experience for 64 frames...
|
1347 |
+
[2024-09-06 08:35:52,299][01070] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 4005888. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
1348 |
+
[2024-09-06 08:35:52,385][19118] Decorrelating experience for 0 frames...
|
1349 |
+
[2024-09-06 08:35:52,773][19116] Decorrelating experience for 32 frames...
|
1350 |
+
[2024-09-06 08:35:53,059][19117] Decorrelating experience for 64 frames...
|
1351 |
+
[2024-09-06 08:35:53,088][19115] Decorrelating experience for 64 frames...
|
1352 |
+
[2024-09-06 08:35:53,163][19111] Decorrelating experience for 64 frames...
|
1353 |
+
[2024-09-06 08:35:53,227][19114] Decorrelating experience for 32 frames...
|
1354 |
+
[2024-09-06 08:35:54,004][19116] Decorrelating experience for 64 frames...
|
1355 |
+
[2024-09-06 08:35:54,080][19111] Decorrelating experience for 96 frames...
|
1356 |
+
[2024-09-06 08:35:54,169][01070] Heartbeat connected on RolloutWorker_w1
|
1357 |
+
[2024-09-06 08:35:54,232][19113] Decorrelating experience for 64 frames...
|
1358 |
+
[2024-09-06 08:35:54,438][19118] Decorrelating experience for 32 frames...
|
1359 |
+
[2024-09-06 08:35:54,884][19114] Decorrelating experience for 64 frames...
|
1360 |
+
[2024-09-06 08:35:55,752][19112] Decorrelating experience for 96 frames...
|
1361 |
+
[2024-09-06 08:35:55,972][01070] Heartbeat connected on RolloutWorker_w0
|
1362 |
+
[2024-09-06 08:35:56,002][19113] Decorrelating experience for 96 frames...
|
1363 |
+
[2024-09-06 08:35:56,247][01070] Heartbeat connected on RolloutWorker_w2
|
1364 |
+
[2024-09-06 08:35:56,489][19118] Decorrelating experience for 64 frames...
|
1365 |
+
[2024-09-06 08:35:56,773][19114] Decorrelating experience for 96 frames...
|
1366 |
+
[2024-09-06 08:35:57,299][01070] Heartbeat connected on RolloutWorker_w4
|
1367 |
+
[2024-09-06 08:35:57,303][01070] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 4005888. Throughput: 0: 71.2. Samples: 712. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
1368 |
+
[2024-09-06 08:35:57,307][01070] Avg episode reward: [(0, '5.702')]
|
1369 |
+
[2024-09-06 08:35:57,398][19116] Decorrelating experience for 96 frames...
|
1370 |
+
[2024-09-06 08:35:57,706][01070] Heartbeat connected on RolloutWorker_w5
|
1371 |
+
[2024-09-06 08:35:57,841][19117] Decorrelating experience for 96 frames...
|
1372 |
+
[2024-09-06 08:35:58,244][01070] Heartbeat connected on RolloutWorker_w7
|
1373 |
+
[2024-09-06 08:36:00,233][19093] Signal inference workers to stop experience collection...
|
1374 |
+
[2024-09-06 08:36:00,248][19110] InferenceWorker_p0-w0: stopping experience collection
|
1375 |
+
[2024-09-06 08:36:00,307][19118] Decorrelating experience for 96 frames...
|
1376 |
+
[2024-09-06 08:36:00,409][01070] Heartbeat connected on RolloutWorker_w6
|
1377 |
+
[2024-09-06 08:36:00,811][19115] Decorrelating experience for 96 frames...
|
1378 |
+
[2024-09-06 08:36:00,904][01070] Heartbeat connected on RolloutWorker_w3
|
1379 |
+
[2024-09-06 08:36:02,107][19093] Signal inference workers to resume experience collection...
|
1380 |
+
[2024-09-06 08:36:02,108][19110] InferenceWorker_p0-w0: resuming experience collection
|
1381 |
+
[2024-09-06 08:36:02,300][01070] Fps is (10 sec: 409.6, 60 sec: 273.0, 300 sec: 273.0). Total num frames: 4009984. Throughput: 0: 147.9. Samples: 2218. Policy #0 lag: (min: 0.0, avg: 0.0, max: 0.0)
|
1382 |
+
[2024-09-06 08:36:02,302][01070] Avg episode reward: [(0, '4.701')]
|
1383 |
+
[2024-09-06 08:36:07,302][01070] Fps is (10 sec: 1638.4, 60 sec: 819.0, 300 sec: 819.0). Total num frames: 4022272. Throughput: 0: 228.8. Samples: 4576. Policy #0 lag: (min: 0.0, avg: 0.9, max: 2.0)
|
1384 |
+
[2024-09-06 08:36:07,305][01070] Avg episode reward: [(0, '6.521')]
|
1385 |
+
[2024-09-06 08:36:12,073][19110] Updated weights for policy 0, policy_version 988 (0.0020)
|
1386 |
+
[2024-09-06 08:36:12,300][01070] Fps is (10 sec: 3686.1, 60 sec: 1638.3, 300 sec: 1638.3). Total num frames: 4046848. Throughput: 0: 391.6. Samples: 9790. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
1387 |
+
[2024-09-06 08:36:12,306][01070] Avg episode reward: [(0, '11.236')]
|
1388 |
+
[2024-09-06 08:36:17,299][01070] Fps is (10 sec: 4507.3, 60 sec: 2048.0, 300 sec: 2048.0). Total num frames: 4067328. Throughput: 0: 445.7. Samples: 13370. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1389 |
+
[2024-09-06 08:36:17,305][01070] Avg episode reward: [(0, '12.296')]
|
1390 |
+
[2024-09-06 08:36:22,299][01070] Fps is (10 sec: 3687.1, 60 sec: 2223.5, 300 sec: 2223.5). Total num frames: 4083712. Throughput: 0: 556.9. Samples: 19492. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1391 |
+
[2024-09-06 08:36:22,301][01070] Avg episode reward: [(0, '14.420')]
|
1392 |
+
[2024-09-06 08:36:22,439][19110] Updated weights for policy 0, policy_version 998 (0.0030)
|
1393 |
+
[2024-09-06 08:36:27,299][01070] Fps is (10 sec: 3686.3, 60 sec: 2457.6, 300 sec: 2457.6). Total num frames: 4104192. Throughput: 0: 602.8. Samples: 24114. Policy #0 lag: (min: 0.0, avg: 0.3, max: 2.0)
|
1394 |
+
[2024-09-06 08:36:27,301][01070] Avg episode reward: [(0, '16.055')]
|
1395 |
+
[2024-09-06 08:36:32,188][19110] Updated weights for policy 0, policy_version 1008 (0.0021)
|
1396 |
+
[2024-09-06 08:36:32,299][01070] Fps is (10 sec: 4505.6, 60 sec: 2730.7, 300 sec: 2730.7). Total num frames: 4128768. Throughput: 0: 615.9. Samples: 27716. Policy #0 lag: (min: 0.0, avg: 0.3, max: 2.0)
|
1397 |
+
[2024-09-06 08:36:32,305][01070] Avg episode reward: [(0, '18.102')]
|
1398 |
+
[2024-09-06 08:36:32,309][19093] Saving new best policy, reward=18.102!
|
1399 |
+
[2024-09-06 08:36:37,300][01070] Fps is (10 sec: 4505.2, 60 sec: 2867.1, 300 sec: 2867.1). Total num frames: 4149248. Throughput: 0: 773.5. Samples: 34808. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
1400 |
+
[2024-09-06 08:36:37,305][01070] Avg episode reward: [(0, '19.063')]
|
1401 |
+
[2024-09-06 08:36:37,312][19093] Saving new best policy, reward=19.063!
|
1402 |
+
[2024-09-06 08:36:42,299][01070] Fps is (10 sec: 3276.7, 60 sec: 2829.9, 300 sec: 2829.9). Total num frames: 4161536. Throughput: 0: 852.4. Samples: 39066. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1403 |
+
[2024-09-06 08:36:42,303][01070] Avg episode reward: [(0, '20.446')]
|
1404 |
+
[2024-09-06 08:36:42,306][19093] Saving new best policy, reward=20.446!
|
1405 |
+
[2024-09-06 08:36:43,965][19110] Updated weights for policy 0, policy_version 1018 (0.0034)
|
1406 |
+
[2024-09-06 08:36:47,299][01070] Fps is (10 sec: 3277.1, 60 sec: 2935.5, 300 sec: 2935.5). Total num frames: 4182016. Throughput: 0: 879.8. Samples: 41808. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
1407 |
+
[2024-09-06 08:36:47,305][01070] Avg episode reward: [(0, '21.840')]
|
1408 |
+
[2024-09-06 08:36:47,316][19093] Saving new best policy, reward=21.840!
|
1409 |
+
[2024-09-06 08:36:52,299][01070] Fps is (10 sec: 4505.7, 60 sec: 3345.1, 300 sec: 3087.8). Total num frames: 4206592. Throughput: 0: 982.8. Samples: 48798. Policy #0 lag: (min: 0.0, avg: 0.3, max: 2.0)
|
1410 |
+
[2024-09-06 08:36:52,304][01070] Avg episode reward: [(0, '21.047')]
|
1411 |
+
[2024-09-06 08:36:52,709][19110] Updated weights for policy 0, policy_version 1028 (0.0017)
|
1412 |
+
[2024-09-06 08:36:57,304][01070] Fps is (10 sec: 4094.0, 60 sec: 3618.1, 300 sec: 3101.0). Total num frames: 4222976. Throughput: 0: 987.4. Samples: 54226. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
1413 |
+
[2024-09-06 08:36:57,306][01070] Avg episode reward: [(0, '20.552')]
|
1414 |
+
[2024-09-06 08:37:02,299][01070] Fps is (10 sec: 2457.6, 60 sec: 3686.5, 300 sec: 3003.7). Total num frames: 4231168. Throughput: 0: 944.6. Samples: 55878. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
1415 |
+
[2024-09-06 08:37:02,301][01070] Avg episode reward: [(0, '21.311')]
|
1416 |
+
[2024-09-06 08:37:07,058][19110] Updated weights for policy 0, policy_version 1038 (0.0038)
|
1417 |
+
[2024-09-06 08:37:07,299][01070] Fps is (10 sec: 2868.7, 60 sec: 3823.2, 300 sec: 3072.0). Total num frames: 4251648. Throughput: 0: 897.5. Samples: 59880. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
1418 |
+
[2024-09-06 08:37:07,301][01070] Avg episode reward: [(0, '20.332')]
|
1419 |
+
[2024-09-06 08:37:12,299][01070] Fps is (10 sec: 4096.0, 60 sec: 3754.8, 300 sec: 3132.2). Total num frames: 4272128. Throughput: 0: 950.4. Samples: 66884. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
1420 |
+
[2024-09-06 08:37:12,301][01070] Avg episode reward: [(0, '19.661')]
|
1421 |
+
[2024-09-06 08:37:17,299][01070] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3140.3). Total num frames: 4288512. Throughput: 0: 928.7. Samples: 69506. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
1422 |
+
[2024-09-06 08:37:17,302][01070] Avg episode reward: [(0, '20.777')]
|
1423 |
+
[2024-09-06 08:37:18,168][19110] Updated weights for policy 0, policy_version 1048 (0.0027)
|
1424 |
+
[2024-09-06 08:37:22,299][01070] Fps is (10 sec: 3686.4, 60 sec: 3754.7, 300 sec: 3190.6). Total num frames: 4308992. Throughput: 0: 873.3. Samples: 74104. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
1425 |
+
[2024-09-06 08:37:22,304][01070] Avg episode reward: [(0, '21.018')]
|
1426 |
+
[2024-09-06 08:37:27,299][01070] Fps is (10 sec: 4505.6, 60 sec: 3822.9, 300 sec: 3276.8). Total num frames: 4333568. Throughput: 0: 938.8. Samples: 81312. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1427 |
+
[2024-09-06 08:37:27,300][19110] Updated weights for policy 0, policy_version 1058 (0.0031)
|
1428 |
+
[2024-09-06 08:37:27,301][01070] Avg episode reward: [(0, '21.381')]
|
1429 |
+
[2024-09-06 08:37:27,314][19093] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001058_4333568.pth...
|
1430 |
+
[2024-09-06 08:37:27,438][19093] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000975_3993600.pth
|
1431 |
+
[2024-09-06 08:37:32,300][01070] Fps is (10 sec: 4095.5, 60 sec: 3686.3, 300 sec: 3276.8). Total num frames: 4349952. Throughput: 0: 954.6. Samples: 84768. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
1432 |
+
[2024-09-06 08:37:32,306][01070] Avg episode reward: [(0, '20.498')]
|
1433 |
+
[2024-09-06 08:37:37,299][01070] Fps is (10 sec: 3276.8, 60 sec: 3618.2, 300 sec: 3276.8). Total num frames: 4366336. Throughput: 0: 899.8. Samples: 89288. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
1434 |
+
[2024-09-06 08:37:37,302][01070] Avg episode reward: [(0, '20.356')]
|
1435 |
+
[2024-09-06 08:37:38,951][19110] Updated weights for policy 0, policy_version 1068 (0.0030)
|
1436 |
+
[2024-09-06 08:37:42,302][01070] Fps is (10 sec: 3685.8, 60 sec: 3754.5, 300 sec: 3312.3). Total num frames: 4386816. Throughput: 0: 918.0. Samples: 95532. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1437 |
+
[2024-09-06 08:37:42,306][01070] Avg episode reward: [(0, '18.966')]
|
1438 |
+
[2024-09-06 08:37:47,299][01070] Fps is (10 sec: 4505.6, 60 sec: 3823.0, 300 sec: 3379.2). Total num frames: 4411392. Throughput: 0: 960.9. Samples: 99118. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1439 |
+
[2024-09-06 08:37:47,303][01070] Avg episode reward: [(0, '19.345')]
|
1440 |
+
[2024-09-06 08:37:48,240][19110] Updated weights for policy 0, policy_version 1078 (0.0013)
|
1441 |
+
[2024-09-06 08:37:52,299][01070] Fps is (10 sec: 3687.5, 60 sec: 3618.1, 300 sec: 3342.3). Total num frames: 4423680. Throughput: 0: 989.1. Samples: 104388. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1442 |
+
[2024-09-06 08:37:52,301][01070] Avg episode reward: [(0, '19.443')]
|
1443 |
+
[2024-09-06 08:37:57,299][01070] Fps is (10 sec: 3686.4, 60 sec: 3755.0, 300 sec: 3402.8). Total num frames: 4448256. Throughput: 0: 957.7. Samples: 109982. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1444 |
+
[2024-09-06 08:37:57,301][01070] Avg episode reward: [(0, '20.099')]
|
1445 |
+
[2024-09-06 08:37:59,151][19110] Updated weights for policy 0, policy_version 1088 (0.0024)
|
1446 |
+
[2024-09-06 08:38:02,299][01070] Fps is (10 sec: 4505.6, 60 sec: 3959.5, 300 sec: 3428.5). Total num frames: 4468736. Throughput: 0: 978.0. Samples: 113514. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1447 |
+
[2024-09-06 08:38:02,306][01070] Avg episode reward: [(0, '20.360')]
|
1448 |
+
[2024-09-06 08:38:07,299][01070] Fps is (10 sec: 3686.4, 60 sec: 3891.2, 300 sec: 3423.1). Total num frames: 4485120. Throughput: 0: 1019.1. Samples: 119964. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1449 |
+
[2024-09-06 08:38:07,302][01070] Avg episode reward: [(0, '20.240')]
|
1450 |
+
[2024-09-06 08:38:10,312][19110] Updated weights for policy 0, policy_version 1098 (0.0022)
|
1451 |
+
[2024-09-06 08:38:12,299][01070] Fps is (10 sec: 3276.8, 60 sec: 3822.9, 300 sec: 3418.0). Total num frames: 4501504. Throughput: 0: 953.8. Samples: 124232. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
1452 |
+
[2024-09-06 08:38:12,304][01070] Avg episode reward: [(0, '19.557')]
|
1453 |
+
[2024-09-06 08:38:17,299][01070] Fps is (10 sec: 4096.0, 60 sec: 3959.5, 300 sec: 3467.9). Total num frames: 4526080. Throughput: 0: 954.6. Samples: 127726. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1454 |
+
[2024-09-06 08:38:17,301][01070] Avg episode reward: [(0, '20.239')]
|
1455 |
+
[2024-09-06 08:38:19,439][19110] Updated weights for policy 0, policy_version 1108 (0.0041)
|
1456 |
+
[2024-09-06 08:38:22,301][01070] Fps is (10 sec: 4504.4, 60 sec: 3959.3, 300 sec: 3488.1). Total num frames: 4546560. Throughput: 0: 1011.7. Samples: 134818. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1457 |
+
[2024-09-06 08:38:22,304][01070] Avg episode reward: [(0, '20.506')]
|
1458 |
+
[2024-09-06 08:38:27,299][01070] Fps is (10 sec: 3686.4, 60 sec: 3822.9, 300 sec: 3481.6). Total num frames: 4562944. Throughput: 0: 977.9. Samples: 139534. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1459 |
+
[2024-09-06 08:38:27,303][01070] Avg episode reward: [(0, '20.585')]
|
1460 |
+
[2024-09-06 08:38:31,030][19110] Updated weights for policy 0, policy_version 1118 (0.0046)
|
1461 |
+
[2024-09-06 08:38:32,299][01070] Fps is (10 sec: 3687.4, 60 sec: 3891.3, 300 sec: 3500.2). Total num frames: 4583424. Throughput: 0: 953.9. Samples: 142044. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
1462 |
+
[2024-09-06 08:38:32,306][01070] Avg episode reward: [(0, '20.308')]
|
1463 |
+
[2024-09-06 08:38:37,299][01070] Fps is (10 sec: 4505.6, 60 sec: 4027.7, 300 sec: 3541.8). Total num frames: 4608000. Throughput: 0: 993.6. Samples: 149102. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
1464 |
+
[2024-09-06 08:38:37,304][01070] Avg episode reward: [(0, '21.252')]
|
1465 |
+
[2024-09-06 08:38:40,626][19110] Updated weights for policy 0, policy_version 1128 (0.0030)
|
1466 |
+
[2024-09-06 08:38:42,299][01070] Fps is (10 sec: 4096.0, 60 sec: 3959.7, 300 sec: 3534.3). Total num frames: 4624384. Throughput: 0: 994.4. Samples: 154730. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1467 |
+
[2024-09-06 08:38:42,303][01070] Avg episode reward: [(0, '20.914')]
|
1468 |
+
[2024-09-06 08:38:47,299][01070] Fps is (10 sec: 3276.7, 60 sec: 3822.9, 300 sec: 3527.1). Total num frames: 4640768. Throughput: 0: 963.1. Samples: 156854. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
1469 |
+
[2024-09-06 08:38:47,307][01070] Avg episode reward: [(0, '20.844')]
|
1470 |
+
[2024-09-06 08:38:51,335][19110] Updated weights for policy 0, policy_version 1138 (0.0028)
|
1471 |
+
[2024-09-06 08:38:52,299][01070] Fps is (10 sec: 4095.9, 60 sec: 4027.7, 300 sec: 3564.6). Total num frames: 4665344. Throughput: 0: 963.3. Samples: 163312. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1472 |
+
[2024-09-06 08:38:52,301][01070] Avg episode reward: [(0, '22.493')]
|
1473 |
+
[2024-09-06 08:38:52,304][19093] Saving new best policy, reward=22.493!
|
1474 |
+
[2024-09-06 08:38:57,299][01070] Fps is (10 sec: 4505.7, 60 sec: 3959.5, 300 sec: 3578.6). Total num frames: 4685824. Throughput: 0: 1017.3. Samples: 170010. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
1475 |
+
[2024-09-06 08:38:57,305][01070] Avg episode reward: [(0, '21.806')]
|
1476 |
+
[2024-09-06 08:39:02,299][01070] Fps is (10 sec: 3276.9, 60 sec: 3822.9, 300 sec: 3549.9). Total num frames: 4698112. Throughput: 0: 985.0. Samples: 172050. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
1477 |
+
[2024-09-06 08:39:02,300][01070] Avg episode reward: [(0, '22.885')]
|
1478 |
+
[2024-09-06 08:39:02,305][19093] Saving new best policy, reward=22.885!
|
1479 |
+
[2024-09-06 08:39:02,950][19110] Updated weights for policy 0, policy_version 1148 (0.0035)
|
1480 |
+
[2024-09-06 08:39:07,299][01070] Fps is (10 sec: 3276.8, 60 sec: 3891.2, 300 sec: 3563.5). Total num frames: 4718592. Throughput: 0: 948.3. Samples: 177488. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1481 |
+
[2024-09-06 08:39:07,301][01070] Avg episode reward: [(0, '23.190')]
|
1482 |
+
[2024-09-06 08:39:07,318][19093] Saving new best policy, reward=23.190!
|
1483 |
+
[2024-09-06 08:39:11,864][19110] Updated weights for policy 0, policy_version 1158 (0.0024)
|
1484 |
+
[2024-09-06 08:39:12,299][01070] Fps is (10 sec: 4505.6, 60 sec: 4027.7, 300 sec: 3596.5). Total num frames: 4743168. Throughput: 0: 994.3. Samples: 184276. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
1485 |
+
[2024-09-06 08:39:12,303][01070] Avg episode reward: [(0, '23.069')]
|
1486 |
+
[2024-09-06 08:39:17,299][01070] Fps is (10 sec: 4096.0, 60 sec: 3891.2, 300 sec: 3588.9). Total num frames: 4759552. Throughput: 0: 1001.5. Samples: 187112. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
1487 |
+
[2024-09-06 08:39:17,301][01070] Avg episode reward: [(0, '22.125')]
|
1488 |
+
[2024-09-06 08:39:22,299][01070] Fps is (10 sec: 3276.8, 60 sec: 3823.1, 300 sec: 3581.6). Total num frames: 4775936. Throughput: 0: 941.3. Samples: 191462. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
|
1489 |
+
[2024-09-06 08:39:22,303][01070] Avg episode reward: [(0, '21.027')]
|
1490 |
+
[2024-09-06 08:39:23,456][19110] Updated weights for policy 0, policy_version 1168 (0.0022)
|
1491 |
+
[2024-09-06 08:39:27,299][01070] Fps is (10 sec: 4096.0, 60 sec: 3959.5, 300 sec: 3611.9). Total num frames: 4800512. Throughput: 0: 973.6. Samples: 198540. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1492 |
+
[2024-09-06 08:39:27,301][01070] Avg episode reward: [(0, '19.180')]
|
1493 |
+
[2024-09-06 08:39:27,314][19093] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001172_4800512.pth...
|
1494 |
+
[2024-09-06 08:39:27,463][19093] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth
|
1495 |
+
[2024-09-06 08:39:32,300][01070] Fps is (10 sec: 4504.9, 60 sec: 3959.4, 300 sec: 3622.7). Total num frames: 4820992. Throughput: 0: 1000.5. Samples: 201878. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
1496 |
+
[2024-09-06 08:39:32,303][01070] Avg episode reward: [(0, '20.069')]
|
1497 |
+
[2024-09-06 08:39:33,797][19110] Updated weights for policy 0, policy_version 1178 (0.0024)
|
1498 |
+
[2024-09-06 08:39:37,299][01070] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3597.4). Total num frames: 4833280. Throughput: 0: 959.6. Samples: 206492. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
1499 |
+
[2024-09-06 08:39:37,301][01070] Avg episode reward: [(0, '19.170')]
|
1500 |
+
[2024-09-06 08:39:42,299][01070] Fps is (10 sec: 3687.0, 60 sec: 3891.2, 300 sec: 3625.4). Total num frames: 4857856. Throughput: 0: 947.1. Samples: 212628. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
|
1501 |
+
[2024-09-06 08:39:42,306][01070] Avg episode reward: [(0, '20.382')]
|
1502 |
+
[2024-09-06 08:39:43,732][19110] Updated weights for policy 0, policy_version 1188 (0.0024)
|
1503 |
+
[2024-09-06 08:39:47,299][01070] Fps is (10 sec: 4505.6, 60 sec: 3959.5, 300 sec: 3635.2). Total num frames: 4878336. Throughput: 0: 980.8. Samples: 216188. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1504 |
+
[2024-09-06 08:39:47,313][01070] Avg episode reward: [(0, '21.641')]
|
1505 |
+
[2024-09-06 08:39:52,299][01070] Fps is (10 sec: 3686.4, 60 sec: 3823.0, 300 sec: 3627.9). Total num frames: 4894720. Throughput: 0: 984.4. Samples: 221786. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
1506 |
+
[2024-09-06 08:39:52,301][01070] Avg episode reward: [(0, '21.803')]
|
1507 |
+
[2024-09-06 08:39:55,248][19110] Updated weights for policy 0, policy_version 1198 (0.0031)
|
1508 |
+
[2024-09-06 08:39:57,299][01070] Fps is (10 sec: 3686.4, 60 sec: 3822.9, 300 sec: 3637.2). Total num frames: 4915200. Throughput: 0: 951.6. Samples: 227098. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
1509 |
+
[2024-09-06 08:39:57,304][01070] Avg episode reward: [(0, '22.439')]
|
1510 |
+
[2024-09-06 08:40:02,299][01070] Fps is (10 sec: 4505.6, 60 sec: 4027.7, 300 sec: 3662.3). Total num frames: 4939776. Throughput: 0: 968.4. Samples: 230692. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
1511 |
+
[2024-09-06 08:40:02,305][01070] Avg episode reward: [(0, '23.387')]
|
1512 |
+
[2024-09-06 08:40:02,307][19093] Saving new best policy, reward=23.387!
|
1513 |
+
[2024-09-06 08:40:04,048][19110] Updated weights for policy 0, policy_version 1208 (0.0019)
|
1514 |
+
[2024-09-06 08:40:07,299][01070] Fps is (10 sec: 4096.0, 60 sec: 3959.5, 300 sec: 3654.9). Total num frames: 4956160. Throughput: 0: 1015.8. Samples: 237174. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1515 |
+
[2024-09-06 08:40:07,304][01070] Avg episode reward: [(0, '25.478')]
|
1516 |
+
[2024-09-06 08:40:07,312][19093] Saving new best policy, reward=25.478!
|
1517 |
+
[2024-09-06 08:40:12,299][01070] Fps is (10 sec: 3276.8, 60 sec: 3822.9, 300 sec: 3647.8). Total num frames: 4972544. Throughput: 0: 950.4. Samples: 241310. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
1518 |
+
[2024-09-06 08:40:12,301][01070] Avg episode reward: [(0, '26.573')]
|
1519 |
+
[2024-09-06 08:40:12,304][19093] Saving new best policy, reward=26.573!
|
1520 |
+
[2024-09-06 08:40:15,918][19110] Updated weights for policy 0, policy_version 1218 (0.0034)
|
1521 |
+
[2024-09-06 08:40:17,299][01070] Fps is (10 sec: 3686.4, 60 sec: 3891.2, 300 sec: 3656.1). Total num frames: 4993024. Throughput: 0: 947.8. Samples: 244528. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
1522 |
+
[2024-09-06 08:40:17,301][01070] Avg episode reward: [(0, '26.655')]
|
1523 |
+
[2024-09-06 08:40:17,310][19093] Saving new best policy, reward=26.655!
|
1524 |
+
[2024-09-06 08:40:19,413][19093] Stopping Batcher_0...
|
1525 |
+
[2024-09-06 08:40:19,413][01070] Component Batcher_0 stopped!
|
1526 |
+
[2024-09-06 08:40:19,415][19093] Loop batcher_evt_loop terminating...
|
1527 |
+
[2024-09-06 08:40:19,420][19093] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001222_5005312.pth...
|
1528 |
+
[2024-09-06 08:40:19,470][19110] Weights refcount: 2 0
|
1529 |
+
[2024-09-06 08:40:19,478][01070] Component InferenceWorker_p0-w0 stopped!
|
1530 |
+
[2024-09-06 08:40:19,481][19110] Stopping InferenceWorker_p0-w0...
|
1531 |
+
[2024-09-06 08:40:19,481][19110] Loop inference_proc0-0_evt_loop terminating...
|
1532 |
+
[2024-09-06 08:40:19,543][19093] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001058_4333568.pth
|
1533 |
+
[2024-09-06 08:40:19,557][19093] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001222_5005312.pth...
|
1534 |
+
[2024-09-06 08:40:19,761][01070] Component LearnerWorker_p0 stopped!
|
1535 |
+
[2024-09-06 08:40:19,761][19093] Stopping LearnerWorker_p0...
|
1536 |
+
[2024-09-06 08:40:19,766][19093] Loop learner_proc0_evt_loop terminating...
|
1537 |
+
[2024-09-06 08:40:19,774][01070] Component RolloutWorker_w3 stopped!
|
1538 |
+
[2024-09-06 08:40:19,777][19115] Stopping RolloutWorker_w3...
|
1539 |
+
[2024-09-06 08:40:19,779][19115] Loop rollout_proc3_evt_loop terminating...
|
1540 |
+
[2024-09-06 08:40:19,804][01070] Component RolloutWorker_w1 stopped!
|
1541 |
+
[2024-09-06 08:40:19,806][19111] Stopping RolloutWorker_w1...
|
1542 |
+
[2024-09-06 08:40:19,813][19111] Loop rollout_proc1_evt_loop terminating...
|
1543 |
+
[2024-09-06 08:40:19,874][01070] Component RolloutWorker_w7 stopped!
|
1544 |
+
[2024-09-06 08:40:19,880][19117] Stopping RolloutWorker_w7...
|
1545 |
+
[2024-09-06 08:40:19,888][19117] Loop rollout_proc7_evt_loop terminating...
|
1546 |
+
[2024-09-06 08:40:19,902][01070] Component RolloutWorker_w5 stopped!
|
1547 |
+
[2024-09-06 08:40:19,904][19116] Stopping RolloutWorker_w5...
|
1548 |
+
[2024-09-06 08:40:19,909][19116] Loop rollout_proc5_evt_loop terminating...
|
1549 |
+
[2024-09-06 08:40:20,003][19118] Stopping RolloutWorker_w6...
|
1550 |
+
[2024-09-06 08:40:20,003][19118] Loop rollout_proc6_evt_loop terminating...
|
1551 |
+
[2024-09-06 08:40:20,003][01070] Component RolloutWorker_w6 stopped!
|
1552 |
+
[2024-09-06 08:40:20,035][19113] Stopping RolloutWorker_w2...
|
1553 |
+
[2024-09-06 08:40:20,037][19113] Loop rollout_proc2_evt_loop terminating...
|
1554 |
+
[2024-09-06 08:40:20,035][01070] Component RolloutWorker_w2 stopped!
|
1555 |
+
[2024-09-06 08:40:20,058][19114] Stopping RolloutWorker_w4...
|
1556 |
+
[2024-09-06 08:40:20,057][01070] Component RolloutWorker_w4 stopped!
|
1557 |
+
[2024-09-06 08:40:20,062][19114] Loop rollout_proc4_evt_loop terminating...
|
1558 |
+
[2024-09-06 08:40:20,066][19112] Stopping RolloutWorker_w0...
|
1559 |
+
[2024-09-06 08:40:20,066][01070] Component RolloutWorker_w0 stopped!
|
1560 |
+
[2024-09-06 08:40:20,068][01070] Waiting for process learner_proc0 to stop...
|
1561 |
+
[2024-09-06 08:40:20,078][19112] Loop rollout_proc0_evt_loop terminating...
|
1562 |
+
[2024-09-06 08:40:21,230][01070] Waiting for process inference_proc0-0 to join...
|
1563 |
+
[2024-09-06 08:40:21,233][01070] Waiting for process rollout_proc0 to join...
|
1564 |
+
[2024-09-06 08:40:24,143][01070] Waiting for process rollout_proc1 to join...
|
1565 |
+
[2024-09-06 08:40:24,148][01070] Waiting for process rollout_proc2 to join...
|
1566 |
+
[2024-09-06 08:40:24,150][01070] Waiting for process rollout_proc3 to join...
|
1567 |
+
[2024-09-06 08:40:24,153][01070] Waiting for process rollout_proc4 to join...
|
1568 |
+
[2024-09-06 08:40:24,159][01070] Waiting for process rollout_proc5 to join...
|
1569 |
+
[2024-09-06 08:40:24,162][01070] Waiting for process rollout_proc6 to join...
|
1570 |
+
[2024-09-06 08:40:24,165][01070] Waiting for process rollout_proc7 to join...
|
1571 |
+
[2024-09-06 08:40:24,168][01070] Batcher 0 profile tree view:
|
1572 |
+
batching: 7.2186, releasing_batches: 0.0064
|
1573 |
+
[2024-09-06 08:40:24,170][01070] InferenceWorker_p0-w0 profile tree view:
|
1574 |
+
wait_policy: 0.0054
|
1575 |
+
wait_policy_total: 102.6532
|
1576 |
+
update_model: 2.2548
|
1577 |
+
weight_update: 0.0030
|
1578 |
+
one_step: 0.0026
|
1579 |
+
handle_policy_step: 154.4121
|
1580 |
+
deserialize: 3.6770, stack: 0.8213, obs_to_device_normalize: 31.1067, forward: 82.7946, send_messages: 7.5294
|
1581 |
+
prepare_outputs: 21.0675
|
1582 |
+
to_cpu: 12.5076
|
1583 |
+
[2024-09-06 08:40:24,171][01070] Learner 0 profile tree view:
|
1584 |
+
misc: 0.0012, prepare_batch: 5.2823
|
1585 |
+
train: 21.8730
|
1586 |
+
epoch_init: 0.0014, minibatch_init: 0.0016, losses_postprocess: 0.1603, kl_divergence: 0.2048, after_optimizer: 0.9110
|
1587 |
+
calculate_losses: 8.7013
|
1588 |
+
losses_init: 0.0013, forward_head: 0.6601, bptt_initial: 6.1101, tail: 0.3325, advantages_returns: 0.0894, losses: 0.9715
|
1589 |
+
bptt: 0.4678
|
1590 |
+
bptt_forward_core: 0.4488
|
1591 |
+
update: 11.7699
|
1592 |
+
clip: 0.2453
|
1593 |
+
[2024-09-06 08:40:24,173][01070] RolloutWorker_w0 profile tree view:
|
1594 |
+
wait_for_trajectories: 0.0619, enqueue_policy_requests: 23.3356, env_step: 206.1955, overhead: 3.3833, complete_rollouts: 1.8563
|
1595 |
+
save_policy_outputs: 5.2619
|
1596 |
+
split_output_tensors: 2.1953
|
1597 |
+
[2024-09-06 08:40:24,175][01070] RolloutWorker_w7 profile tree view:
|
1598 |
+
wait_for_trajectories: 0.0745, enqueue_policy_requests: 24.4494, env_step: 203.3563, overhead: 3.1667, complete_rollouts: 1.6788
|
1599 |
+
save_policy_outputs: 4.9515
|
1600 |
+
split_output_tensors: 2.0046
|
1601 |
+
[2024-09-06 08:40:24,177][01070] Loop Runner_EvtLoop terminating...
|
1602 |
+
[2024-09-06 08:40:24,179][01070] Runner profile tree view:
|
1603 |
+
main_loop: 294.6944
|
1604 |
+
[2024-09-06 08:40:24,180][01070] Collected {0: 5005312}, FPS: 3391.4
|
1605 |
+
[2024-09-06 08:56:44,318][01070] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json
|
1606 |
+
[2024-09-06 08:56:44,320][01070] Overriding arg 'num_workers' with value 1 passed from command line
|
1607 |
+
[2024-09-06 08:56:44,322][01070] Adding new argument 'no_render'=True that is not in the saved config file!
|
1608 |
+
[2024-09-06 08:56:44,324][01070] Adding new argument 'save_video'=True that is not in the saved config file!
|
1609 |
+
[2024-09-06 08:56:44,326][01070] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
|
1610 |
+
[2024-09-06 08:56:44,327][01070] Adding new argument 'video_name'=None that is not in the saved config file!
|
1611 |
+
[2024-09-06 08:56:44,328][01070] Adding new argument 'max_num_frames'=1000000000.0 that is not in the saved config file!
|
1612 |
+
[2024-09-06 08:56:44,329][01070] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
|
1613 |
+
[2024-09-06 08:56:44,331][01070] Adding new argument 'push_to_hub'=False that is not in the saved config file!
|
1614 |
+
[2024-09-06 08:56:44,332][01070] Adding new argument 'hf_repository'=None that is not in the saved config file!
|
1615 |
+
[2024-09-06 08:56:44,333][01070] Adding new argument 'policy_index'=0 that is not in the saved config file!
|
1616 |
+
[2024-09-06 08:56:44,334][01070] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
|
1617 |
+
[2024-09-06 08:56:44,339][01070] Adding new argument 'train_script'=None that is not in the saved config file!
|
1618 |
+
[2024-09-06 08:56:44,340][01070] Adding new argument 'enjoy_script'=None that is not in the saved config file!
|
1619 |
+
[2024-09-06 08:56:44,341][01070] Using frameskip 1 and render_action_repeat=4 for evaluation
|
1620 |
+
[2024-09-06 08:56:44,368][01070] RunningMeanStd input shape: (3, 72, 128)
|
1621 |
+
[2024-09-06 08:56:44,370][01070] RunningMeanStd input shape: (1,)
|
1622 |
+
[2024-09-06 08:56:44,390][01070] ConvEncoder: input_channels=3
|
1623 |
+
[2024-09-06 08:56:44,435][01070] Conv encoder output size: 512
|
1624 |
+
[2024-09-06 08:56:44,436][01070] Policy head output size: 512
|
1625 |
+
[2024-09-06 08:56:44,457][01070] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001222_5005312.pth...
|
1626 |
+
[2024-09-06 08:56:44,883][01070] Num frames 100...
|
1627 |
+
[2024-09-06 08:56:45,006][01070] Num frames 200...
|
1628 |
+
[2024-09-06 08:56:45,128][01070] Num frames 300...
|
1629 |
+
[2024-09-06 08:56:45,247][01070] Num frames 400...
|
1630 |
+
[2024-09-06 08:56:45,367][01070] Num frames 500...
|
1631 |
+
[2024-09-06 08:56:45,497][01070] Num frames 600...
|
1632 |
+
[2024-09-06 08:56:45,618][01070] Num frames 700...
|
1633 |
+
[2024-09-06 08:56:45,741][01070] Num frames 800...
|
1634 |
+
[2024-09-06 08:56:45,860][01070] Num frames 900...
|
1635 |
+
[2024-09-06 08:56:45,978][01070] Num frames 1000...
|
1636 |
+
[2024-09-06 08:56:46,102][01070] Num frames 1100...
|
1637 |
+
[2024-09-06 08:56:46,224][01070] Num frames 1200...
|
1638 |
+
[2024-09-06 08:56:46,348][01070] Num frames 1300...
|
1639 |
+
[2024-09-06 08:56:46,479][01070] Num frames 1400...
|
1640 |
+
[2024-09-06 08:56:46,606][01070] Num frames 1500...
|
1641 |
+
[2024-09-06 08:56:46,726][01070] Num frames 1600...
|
1642 |
+
[2024-09-06 08:56:46,845][01070] Num frames 1700...
|
1643 |
+
[2024-09-06 08:56:46,967][01070] Num frames 1800...
|
1644 |
+
[2024-09-06 08:56:47,110][01070] Avg episode rewards: #0: 46.719, true rewards: #0: 18.720
|
1645 |
+
[2024-09-06 08:56:47,113][01070] Avg episode reward: 46.719, avg true_objective: 18.720
|
1646 |
+
[2024-09-06 08:56:47,150][01070] Num frames 1900...
|
1647 |
+
[2024-09-06 08:56:47,268][01070] Num frames 2000...
|
1648 |
+
[2024-09-06 08:56:47,388][01070] Num frames 2100...
|
1649 |
+
[2024-09-06 08:56:47,523][01070] Num frames 2200...
|
1650 |
+
[2024-09-06 08:56:47,671][01070] Num frames 2300...
|
1651 |
+
[2024-09-06 08:56:47,751][01070] Avg episode rewards: #0: 27.100, true rewards: #0: 11.600
|
1652 |
+
[2024-09-06 08:56:47,752][01070] Avg episode reward: 27.100, avg true_objective: 11.600
|
1653 |
+
[2024-09-06 08:56:47,854][01070] Num frames 2400...
|
1654 |
+
[2024-09-06 08:56:47,975][01070] Num frames 2500...
|
1655 |
+
[2024-09-06 08:56:48,094][01070] Num frames 2600...
|
1656 |
+
[2024-09-06 08:56:48,217][01070] Num frames 2700...
|
1657 |
+
[2024-09-06 08:56:48,342][01070] Num frames 2800...
|
1658 |
+
[2024-09-06 08:56:48,467][01070] Num frames 2900...
|
1659 |
+
[2024-09-06 08:56:48,602][01070] Num frames 3000...
|
1660 |
+
[2024-09-06 08:56:48,749][01070] Num frames 3100...
|
1661 |
+
[2024-09-06 08:56:48,919][01070] Num frames 3200...
|
1662 |
+
[2024-09-06 08:56:49,093][01070] Num frames 3300...
|
1663 |
+
[2024-09-06 08:56:49,273][01070] Avg episode rewards: #0: 26.253, true rewards: #0: 11.253
|
1664 |
+
[2024-09-06 08:56:49,277][01070] Avg episode reward: 26.253, avg true_objective: 11.253
|
1665 |
+
[2024-09-06 08:56:49,319][01070] Num frames 3400...
|
1666 |
+
[2024-09-06 08:56:49,496][01070] Num frames 3500...
|
1667 |
+
[2024-09-06 08:56:49,661][01070] Num frames 3600...
|
1668 |
+
[2024-09-06 08:56:49,822][01070] Num frames 3700...
|
1669 |
+
[2024-09-06 08:56:50,036][01070] Avg episode rewards: #0: 21.230, true rewards: #0: 9.480
|
1670 |
+
[2024-09-06 08:56:50,038][01070] Avg episode reward: 21.230, avg true_objective: 9.480
|
1671 |
+
[2024-09-06 08:56:50,057][01070] Num frames 3800...
|
1672 |
+
[2024-09-06 08:56:50,224][01070] Num frames 3900...
|
1673 |
+
[2024-09-06 08:56:50,397][01070] Num frames 4000...
|
1674 |
+
[2024-09-06 08:56:50,585][01070] Num frames 4100...
|
1675 |
+
[2024-09-06 08:56:50,755][01070] Num frames 4200...
|
1676 |
+
[2024-09-06 08:56:50,944][01070] Num frames 4300...
|
1677 |
+
[2024-09-06 08:56:51,125][01070] Num frames 4400...
|
1678 |
+
[2024-09-06 08:56:51,259][01070] Num frames 4500...
|
1679 |
+
[2024-09-06 08:56:51,382][01070] Num frames 4600...
|
1680 |
+
[2024-09-06 08:56:51,504][01070] Num frames 4700...
|
1681 |
+
[2024-09-06 08:56:51,623][01070] Num frames 4800...
|
1682 |
+
[2024-09-06 08:56:51,752][01070] Num frames 4900...
|
1683 |
+
[2024-09-06 08:56:51,870][01070] Num frames 5000...
|
1684 |
+
[2024-09-06 08:56:51,988][01070] Num frames 5100...
|
1685 |
+
[2024-09-06 08:56:52,111][01070] Num frames 5200...
|
1686 |
+
[2024-09-06 08:56:52,228][01070] Num frames 5300...
|
1687 |
+
[2024-09-06 08:56:52,393][01070] Avg episode rewards: #0: 24.784, true rewards: #0: 10.784
|
1688 |
+
[2024-09-06 08:56:52,394][01070] Avg episode reward: 24.784, avg true_objective: 10.784
|
1689 |
+
[2024-09-06 08:56:52,408][01070] Num frames 5400...
|
1690 |
+
[2024-09-06 08:56:52,534][01070] Num frames 5500...
|
1691 |
+
[2024-09-06 08:56:52,657][01070] Num frames 5600...
|
1692 |
+
[2024-09-06 08:56:52,788][01070] Num frames 5700...
|
1693 |
+
[2024-09-06 08:56:52,907][01070] Num frames 5800...
|
1694 |
+
[2024-09-06 08:56:53,028][01070] Num frames 5900...
|
1695 |
+
[2024-09-06 08:56:53,148][01070] Num frames 6000...
|
1696 |
+
[2024-09-06 08:56:53,268][01070] Num frames 6100...
|
1697 |
+
[2024-09-06 08:56:53,388][01070] Num frames 6200...
|
1698 |
+
[2024-09-06 08:56:53,515][01070] Num frames 6300...
|
1699 |
+
[2024-09-06 08:56:53,638][01070] Num frames 6400...
|
1700 |
+
[2024-09-06 08:56:53,779][01070] Num frames 6500...
|
1701 |
+
[2024-09-06 08:56:53,901][01070] Num frames 6600...
|
1702 |
+
[2024-09-06 08:56:54,025][01070] Num frames 6700...
|
1703 |
+
[2024-09-06 08:56:54,146][01070] Num frames 6800...
|
1704 |
+
[2024-09-06 08:56:54,268][01070] Num frames 6900...
|
1705 |
+
[2024-09-06 08:56:54,390][01070] Num frames 7000...
|
1706 |
+
[2024-09-06 08:56:54,560][01070] Avg episode rewards: #0: 28.313, true rewards: #0: 11.813
|
1707 |
+
[2024-09-06 08:56:54,562][01070] Avg episode reward: 28.313, avg true_objective: 11.813
|
1708 |
+
[2024-09-06 08:56:54,581][01070] Num frames 7100...
|
1709 |
+
[2024-09-06 08:56:54,699][01070] Num frames 7200...
|
1710 |
+
[2024-09-06 08:56:54,827][01070] Num frames 7300...
|
1711 |
+
[2024-09-06 08:56:54,945][01070] Num frames 7400...
|
1712 |
+
[2024-09-06 08:56:55,064][01070] Num frames 7500...
|
1713 |
+
[2024-09-06 08:56:55,188][01070] Num frames 7600...
|
1714 |
+
[2024-09-06 08:56:55,281][01070] Avg episode rewards: #0: 25.760, true rewards: #0: 10.903
|
1715 |
+
[2024-09-06 08:56:55,283][01070] Avg episode reward: 25.760, avg true_objective: 10.903
|
1716 |
+
[2024-09-06 08:56:55,366][01070] Num frames 7700...
|
1717 |
+
[2024-09-06 08:56:55,489][01070] Num frames 7800...
|
1718 |
+
[2024-09-06 08:56:55,612][01070] Num frames 7900...
|
1719 |
+
[2024-09-06 08:56:55,732][01070] Num frames 8000...
|
1720 |
+
[2024-09-06 08:56:55,892][01070] Avg episode rewards: #0: 23.600, true rewards: #0: 10.100
|
1721 |
+
[2024-09-06 08:56:55,894][01070] Avg episode reward: 23.600, avg true_objective: 10.100
|
1722 |
+
[2024-09-06 08:56:55,921][01070] Num frames 8100...
|
1723 |
+
[2024-09-06 08:56:56,039][01070] Num frames 8200...
|
1724 |
+
[2024-09-06 08:56:56,159][01070] Num frames 8300...
|
1725 |
+
[2024-09-06 08:56:56,280][01070] Num frames 8400...
|
1726 |
+
[2024-09-06 08:56:56,402][01070] Num frames 8500...
|
1727 |
+
[2024-09-06 08:56:56,532][01070] Num frames 8600...
|
1728 |
+
[2024-09-06 08:56:56,656][01070] Num frames 8700...
|
1729 |
+
[2024-09-06 08:56:56,782][01070] Num frames 8800...
|
1730 |
+
[2024-09-06 08:56:56,911][01070] Num frames 8900...
|
1731 |
+
[2024-09-06 08:56:57,036][01070] Num frames 9000...
|
1732 |
+
[2024-09-06 08:56:57,162][01070] Num frames 9100...
|
1733 |
+
[2024-09-06 08:56:57,286][01070] Num frames 9200...
|
1734 |
+
[2024-09-06 08:56:57,409][01070] Num frames 9300...
|
1735 |
+
[2024-09-06 08:56:57,498][01070] Avg episode rewards: #0: 24.253, true rewards: #0: 10.364
|
1736 |
+
[2024-09-06 08:56:57,500][01070] Avg episode reward: 24.253, avg true_objective: 10.364
|
1737 |
+
[2024-09-06 08:56:57,588][01070] Num frames 9400...
|
1738 |
+
[2024-09-06 08:56:57,710][01070] Num frames 9500...
|
1739 |
+
[2024-09-06 08:56:57,848][01070] Num frames 9600...
|
1740 |
+
[2024-09-06 08:56:57,973][01070] Num frames 9700...
|
1741 |
+
[2024-09-06 08:56:58,097][01070] Num frames 9800...
|
1742 |
+
[2024-09-06 08:56:58,209][01070] Avg episode rewards: #0: 22.746, true rewards: #0: 9.846
|
1743 |
+
[2024-09-06 08:56:58,211][01070] Avg episode reward: 22.746, avg true_objective: 9.846
|
1744 |
+
[2024-09-06 08:57:59,802][01070] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|
1745 |
+
[2024-09-06 09:00:55,778][01070] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json
|
1746 |
+
[2024-09-06 09:00:55,780][01070] Overriding arg 'num_workers' with value 1 passed from command line
|
1747 |
+
[2024-09-06 09:00:55,782][01070] Adding new argument 'no_render'=True that is not in the saved config file!
|
1748 |
+
[2024-09-06 09:00:55,784][01070] Adding new argument 'save_video'=True that is not in the saved config file!
|
1749 |
+
[2024-09-06 09:00:55,786][01070] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
|
1750 |
+
[2024-09-06 09:00:55,789][01070] Adding new argument 'video_name'=None that is not in the saved config file!
|
1751 |
+
[2024-09-06 09:00:55,791][01070] Adding new argument 'max_num_frames'=100000 that is not in the saved config file!
|
1752 |
+
[2024-09-06 09:00:55,792][01070] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
|
1753 |
+
[2024-09-06 09:00:55,793][01070] Adding new argument 'push_to_hub'=True that is not in the saved config file!
|
1754 |
+
[2024-09-06 09:00:55,794][01070] Adding new argument 'hf_repository'='Re-Re/rl_course_vizdoom_health_gathering_supreme' that is not in the saved config file!
|
1755 |
+
[2024-09-06 09:00:55,795][01070] Adding new argument 'policy_index'=0 that is not in the saved config file!
|
1756 |
+
[2024-09-06 09:00:55,796][01070] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
|
1757 |
+
[2024-09-06 09:00:55,797][01070] Adding new argument 'train_script'=None that is not in the saved config file!
|
1758 |
+
[2024-09-06 09:00:55,798][01070] Adding new argument 'enjoy_script'=None that is not in the saved config file!
|
1759 |
+
[2024-09-06 09:00:55,799][01070] Using frameskip 1 and render_action_repeat=4 for evaluation
|
1760 |
+
[2024-09-06 09:00:55,830][01070] RunningMeanStd input shape: (3, 72, 128)
|
1761 |
+
[2024-09-06 09:00:55,832][01070] RunningMeanStd input shape: (1,)
|
1762 |
+
[2024-09-06 09:00:55,846][01070] ConvEncoder: input_channels=3
|
1763 |
+
[2024-09-06 09:00:55,882][01070] Conv encoder output size: 512
|
1764 |
+
[2024-09-06 09:00:55,884][01070] Policy head output size: 512
|
1765 |
+
[2024-09-06 09:00:55,904][01070] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001222_5005312.pth...
|
1766 |
+
[2024-09-06 09:00:56,320][01070] Num frames 100...
|
1767 |
+
[2024-09-06 09:00:56,441][01070] Num frames 200...
|
1768 |
+
[2024-09-06 09:00:56,597][01070] Num frames 300...
|
1769 |
+
[2024-09-06 09:00:56,715][01070] Num frames 400...
|
1770 |
+
[2024-09-06 09:00:56,840][01070] Num frames 500...
|
1771 |
+
[2024-09-06 09:00:56,960][01070] Num frames 600...
|
1772 |
+
[2024-09-06 09:00:57,080][01070] Num frames 700...
|
1773 |
+
[2024-09-06 09:00:57,182][01070] Avg episode rewards: #0: 16.380, true rewards: #0: 7.380
|
1774 |
+
[2024-09-06 09:00:57,184][01070] Avg episode reward: 16.380, avg true_objective: 7.380
|
1775 |
+
[2024-09-06 09:00:57,264][01070] Num frames 800...
|
1776 |
+
[2024-09-06 09:00:57,399][01070] Num frames 900...
|
1777 |
+
[2024-09-06 09:00:57,529][01070] Num frames 1000...
|
1778 |
+
[2024-09-06 09:00:57,650][01070] Num frames 1100...
|
1779 |
+
[2024-09-06 09:00:57,775][01070] Num frames 1200...
|
1780 |
+
[2024-09-06 09:00:57,898][01070] Num frames 1300...
|
1781 |
+
[2024-09-06 09:00:58,016][01070] Num frames 1400...
|
1782 |
+
[2024-09-06 09:00:58,136][01070] Num frames 1500...
|
1783 |
+
[2024-09-06 09:00:58,255][01070] Num frames 1600...
|
1784 |
+
[2024-09-06 09:00:58,314][01070] Avg episode rewards: #0: 18.010, true rewards: #0: 8.010
|
1785 |
+
[2024-09-06 09:00:58,316][01070] Avg episode reward: 18.010, avg true_objective: 8.010
|
1786 |
+
[2024-09-06 09:00:58,433][01070] Num frames 1700...
|
1787 |
+
[2024-09-06 09:00:58,562][01070] Num frames 1800...
|
1788 |
+
[2024-09-06 09:00:58,686][01070] Num frames 1900...
|
1789 |
+
[2024-09-06 09:00:58,804][01070] Num frames 2000...
|
1790 |
+
[2024-09-06 09:00:58,927][01070] Num frames 2100...
|
1791 |
+
[2024-09-06 09:00:59,047][01070] Num frames 2200...
|
1792 |
+
[2024-09-06 09:00:59,165][01070] Num frames 2300...
|
1793 |
+
[2024-09-06 09:00:59,285][01070] Num frames 2400...
|
1794 |
+
[2024-09-06 09:00:59,413][01070] Num frames 2500...
|
1795 |
+
[2024-09-06 09:00:59,544][01070] Num frames 2600...
|
1796 |
+
[2024-09-06 09:00:59,669][01070] Num frames 2700...
|
1797 |
+
[2024-09-06 09:00:59,789][01070] Num frames 2800...
|
1798 |
+
[2024-09-06 09:00:59,937][01070] Num frames 2900...
|
1799 |
+
[2024-09-06 09:01:00,112][01070] Num frames 3000...
|
1800 |
+
[2024-09-06 09:01:00,278][01070] Num frames 3100...
|
1801 |
+
[2024-09-06 09:01:00,453][01070] Num frames 3200...
|
1802 |
+
[2024-09-06 09:01:00,623][01070] Num frames 3300...
|
1803 |
+
[2024-09-06 09:01:00,786][01070] Num frames 3400...
|
1804 |
+
[2024-09-06 09:01:00,952][01070] Num frames 3500...
|
1805 |
+
[2024-09-06 09:01:01,119][01070] Num frames 3600...
|
1806 |
+
[2024-09-06 09:01:01,294][01070] Num frames 3700...
|
1807 |
+
[2024-09-06 09:01:01,356][01070] Avg episode rewards: #0: 30.673, true rewards: #0: 12.340
|
1808 |
+
[2024-09-06 09:01:01,357][01070] Avg episode reward: 30.673, avg true_objective: 12.340
|
1809 |
+
[2024-09-06 09:01:01,535][01070] Num frames 3800...
|
1810 |
+
[2024-09-06 09:01:01,711][01070] Num frames 3900...
|
1811 |
+
[2024-09-06 09:01:01,881][01070] Num frames 4000...
|
1812 |
+
[2024-09-06 09:01:02,049][01070] Num frames 4100...
|
1813 |
+
[2024-09-06 09:01:02,222][01070] Num frames 4200...
|
1814 |
+
[2024-09-06 09:01:02,396][01070] Num frames 4300...
|
1815 |
+
[2024-09-06 09:01:02,534][01070] Num frames 4400...
|
1816 |
+
[2024-09-06 09:01:02,655][01070] Num frames 4500...
|
1817 |
+
[2024-09-06 09:01:02,776][01070] Num frames 4600...
|
1818 |
+
[2024-09-06 09:01:02,898][01070] Num frames 4700...
|
1819 |
+
[2024-09-06 09:01:03,020][01070] Num frames 4800...
|
1820 |
+
[2024-09-06 09:01:03,140][01070] Num frames 4900...
|
1821 |
+
[2024-09-06 09:01:03,261][01070] Num frames 5000...
|
1822 |
+
[2024-09-06 09:01:03,383][01070] Num frames 5100...
|
1823 |
+
[2024-09-06 09:01:03,519][01070] Num frames 5200...
|
1824 |
+
[2024-09-06 09:01:03,641][01070] Num frames 5300...
|
1825 |
+
[2024-09-06 09:01:03,817][01070] Avg episode rewards: #0: 34.225, true rewards: #0: 13.475
|
1826 |
+
[2024-09-06 09:01:03,818][01070] Avg episode reward: 34.225, avg true_objective: 13.475
|
1827 |
+
[2024-09-06 09:01:03,835][01070] Num frames 5400...
|
1828 |
+
[2024-09-06 09:01:03,956][01070] Num frames 5500...
|
1829 |
+
[2024-09-06 09:01:04,074][01070] Num frames 5600...
|
1830 |
+
[2024-09-06 09:01:04,194][01070] Num frames 5700...
|
1831 |
+
[2024-09-06 09:01:04,314][01070] Num frames 5800...
|
1832 |
+
[2024-09-06 09:01:04,436][01070] Num frames 5900...
|
1833 |
+
[2024-09-06 09:01:04,574][01070] Num frames 6000...
|
1834 |
+
[2024-09-06 09:01:04,693][01070] Num frames 6100...
|
1835 |
+
[2024-09-06 09:01:04,810][01070] Num frames 6200...
|
1836 |
+
[2024-09-06 09:01:04,934][01070] Num frames 6300...
|
1837 |
+
[2024-09-06 09:01:05,057][01070] Num frames 6400...
|
1838 |
+
[2024-09-06 09:01:05,177][01070] Num frames 6500...
|
1839 |
+
[2024-09-06 09:01:05,296][01070] Num frames 6600...
|
1840 |
+
[2024-09-06 09:01:05,424][01070] Num frames 6700...
|
1841 |
+
[2024-09-06 09:01:05,562][01070] Num frames 6800...
|
1842 |
+
[2024-09-06 09:01:05,698][01070] Num frames 6900...
|
1843 |
+
[2024-09-06 09:01:05,816][01070] Num frames 7000...
|
1844 |
+
[2024-09-06 09:01:05,936][01070] Num frames 7100...
|
1845 |
+
[2024-09-06 09:01:06,057][01070] Num frames 7200...
|
1846 |
+
[2024-09-06 09:01:06,179][01070] Num frames 7300...
|
1847 |
+
[2024-09-06 09:01:06,303][01070] Num frames 7400...
|
1848 |
+
[2024-09-06 09:01:06,469][01070] Avg episode rewards: #0: 38.779, true rewards: #0: 14.980
|
1849 |
+
[2024-09-06 09:01:06,474][01070] Avg episode reward: 38.779, avg true_objective: 14.980
|
1850 |
+
[2024-09-06 09:01:06,493][01070] Num frames 7500...
|
1851 |
+
[2024-09-06 09:01:06,629][01070] Num frames 7600...
|
1852 |
+
[2024-09-06 09:01:06,753][01070] Num frames 7700...
|
1853 |
+
[2024-09-06 09:01:06,875][01070] Num frames 7800...
|
1854 |
+
[2024-09-06 09:01:06,995][01070] Num frames 7900...
|
1855 |
+
[2024-09-06 09:01:07,118][01070] Num frames 8000...
|
1856 |
+
[2024-09-06 09:01:07,240][01070] Num frames 8100...
|
1857 |
+
[2024-09-06 09:01:07,359][01070] Num frames 8200...
|
1858 |
+
[2024-09-06 09:01:07,476][01070] Avg episode rewards: #0: 34.920, true rewards: #0: 13.753
|
1859 |
+
[2024-09-06 09:01:07,478][01070] Avg episode reward: 34.920, avg true_objective: 13.753
|
1860 |
+
[2024-09-06 09:01:07,541][01070] Num frames 8300...
|
1861 |
+
[2024-09-06 09:01:07,670][01070] Num frames 8400...
|
1862 |
+
[2024-09-06 09:01:07,789][01070] Num frames 8500...
|
1863 |
+
[2024-09-06 09:01:07,912][01070] Num frames 8600...
|
1864 |
+
[2024-09-06 09:01:08,030][01070] Num frames 8700...
|
1865 |
+
[2024-09-06 09:01:08,150][01070] Num frames 8800...
|
1866 |
+
[2024-09-06 09:01:08,275][01070] Num frames 8900...
|
1867 |
+
[2024-09-06 09:01:08,400][01070] Avg episode rewards: #0: 31.794, true rewards: #0: 12.794
|
1868 |
+
[2024-09-06 09:01:08,402][01070] Avg episode reward: 31.794, avg true_objective: 12.794
|
1869 |
+
[2024-09-06 09:01:08,457][01070] Num frames 9000...
|
1870 |
+
[2024-09-06 09:01:08,587][01070] Num frames 9100...
|
1871 |
+
[2024-09-06 09:01:08,723][01070] Num frames 9200...
|
1872 |
+
[2024-09-06 09:01:08,851][01070] Num frames 9300...
|
1873 |
+
[2024-09-06 09:01:08,979][01070] Num frames 9400...
|
1874 |
+
[2024-09-06 09:01:09,106][01070] Num frames 9500...
|
1875 |
+
[2024-09-06 09:01:09,230][01070] Num frames 9600...
|
1876 |
+
[2024-09-06 09:01:09,356][01070] Num frames 9700...
|
1877 |
+
[2024-09-06 09:01:09,487][01070] Num frames 9800...
|
1878 |
+
[2024-09-06 09:01:09,614][01070] Num frames 9900...
|
1879 |
+
[2024-09-06 09:01:09,745][01070] Num frames 10000...
|
1880 |
+
[2024-09-06 09:01:09,870][01070] Num frames 10100...
|
1881 |
+
[2024-09-06 09:01:09,995][01070] Num frames 10200...
|
1882 |
+
[2024-09-06 09:01:10,118][01070] Num frames 10300...
|
1883 |
+
[2024-09-06 09:01:10,238][01070] Num frames 10400...
|
1884 |
+
[2024-09-06 09:01:10,360][01070] Num frames 10500...
|
1885 |
+
[2024-09-06 09:01:10,493][01070] Num frames 10600...
|
1886 |
+
[2024-09-06 09:01:10,614][01070] Num frames 10700...
|
1887 |
+
[2024-09-06 09:01:10,741][01070] Num frames 10800...
|
1888 |
+
[2024-09-06 09:01:10,866][01070] Num frames 10900...
|
1889 |
+
[2024-09-06 09:01:10,993][01070] Num frames 11000...
|
1890 |
+
[2024-09-06 09:01:11,116][01070] Avg episode rewards: #0: 34.820, true rewards: #0: 13.820
|
1891 |
+
[2024-09-06 09:01:11,118][01070] Avg episode reward: 34.820, avg true_objective: 13.820
|
1892 |
+
[2024-09-06 09:01:11,175][01070] Num frames 11100...
|
1893 |
+
[2024-09-06 09:01:11,294][01070] Num frames 11200...
|
1894 |
+
[2024-09-06 09:01:11,414][01070] Num frames 11300...
|
1895 |
+
[2024-09-06 09:01:11,549][01070] Num frames 11400...
|
1896 |
+
[2024-09-06 09:01:11,615][01070] Avg episode rewards: #0: 31.453, true rewards: #0: 12.676
|
1897 |
+
[2024-09-06 09:01:11,617][01070] Avg episode reward: 31.453, avg true_objective: 12.676
|
1898 |
+
[2024-09-06 09:01:11,740][01070] Num frames 11500...
|
1899 |
+
[2024-09-06 09:01:11,869][01070] Num frames 11600...
|
1900 |
+
[2024-09-06 09:01:11,989][01070] Num frames 11700...
|
1901 |
+
[2024-09-06 09:01:12,106][01070] Num frames 11800...
|
1902 |
+
[2024-09-06 09:01:12,231][01070] Avg episode rewards: #0: 29.156, true rewards: #0: 11.856
|
1903 |
+
[2024-09-06 09:01:12,233][01070] Avg episode reward: 29.156, avg true_objective: 11.856
|
1904 |
+
[2024-09-06 09:02:25,893][01070] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|