zacdennis's picture
First Push
cf86a67
raw
history blame
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9564211368560791,
"min": 0.9564211368560791,
"max": 2.858450174331665,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9163.470703125,
"min": 9163.470703125,
"max": 29462.046875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.671629905700684,
"min": 0.43412551283836365,
"max": 12.671629905700684,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2470.9677734375,
"min": 84.22035217285156,
"max": 2560.0986328125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07294110309396952,
"min": 0.06063233535421793,
"max": 0.07611921997101721,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2917644123758781,
"min": 0.2425293414168717,
"max": 0.3805960998550861,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21051914438459218,
"min": 0.12349868572189235,
"max": 0.2788114281846028,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8420765775383687,
"min": 0.4939947428875694,
"max": 1.3239582934800316,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.022727272727273,
"min": 3.6818181818181817,
"max": 25.09090909090909,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1101.0,
"min": 162.0,
"max": 1380.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.022727272727273,
"min": 3.6818181818181817,
"max": 25.09090909090909,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1101.0,
"min": 162.0,
"max": 1380.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691030868",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1691031345"
},
"total": 477.3259838849999,
"count": 1,
"self": 0.7902889380000033,
"children": {
"run_training.setup": {
"total": 0.07688031199995748,
"count": 1,
"self": 0.07688031199995748
},
"TrainerController.start_learning": {
"total": 476.45881463499995,
"count": 1,
"self": 0.5184033049927166,
"children": {
"TrainerController._reset_env": {
"total": 5.854089920999968,
"count": 1,
"self": 5.854089920999968
},
"TrainerController.advance": {
"total": 469.8534997000072,
"count": 18220,
"self": 0.2674291980065391,
"children": {
"env_step": {
"total": 469.58607050200067,
"count": 18220,
"self": 339.2855762299964,
"children": {
"SubprocessEnvManager._take_step": {
"total": 130.03102525199506,
"count": 18220,
"self": 1.700754423996159,
"children": {
"TorchPolicy.evaluate": {
"total": 128.3302708279989,
"count": 18220,
"self": 128.3302708279989
}
}
},
"workers": {
"total": 0.2694690200091827,
"count": 18220,
"self": 0.0,
"children": {
"worker_root": {
"total": 474.83317779700656,
"count": 18220,
"is_parallel": true,
"self": 229.7680240110144,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006885007999926529,
"count": 1,
"is_parallel": true,
"self": 0.003935755999691537,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002949252000234992,
"count": 10,
"is_parallel": true,
"self": 0.002949252000234992
}
}
},
"UnityEnvironment.step": {
"total": 0.03473839899993436,
"count": 1,
"is_parallel": true,
"self": 0.0004158550000283867,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004339649999565154,
"count": 1,
"is_parallel": true,
"self": 0.0004339649999565154
},
"communicator.exchange": {
"total": 0.0318985359999715,
"count": 1,
"is_parallel": true,
"self": 0.0318985359999715
},
"steps_from_proto": {
"total": 0.001990042999977959,
"count": 1,
"is_parallel": true,
"self": 0.00036317500007498893,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00162686799990297,
"count": 10,
"is_parallel": true,
"self": 0.00162686799990297
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 245.06515378599215,
"count": 18219,
"is_parallel": true,
"self": 10.424174261988128,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.570902053998907,
"count": 18219,
"is_parallel": true,
"self": 5.570902053998907
},
"communicator.exchange": {
"total": 193.63325762200202,
"count": 18219,
"is_parallel": true,
"self": 193.63325762200202
},
"steps_from_proto": {
"total": 35.4368198480031,
"count": 18219,
"is_parallel": true,
"self": 6.427021258946752,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.009798589056345,
"count": 182190,
"is_parallel": true,
"self": 29.009798589056345
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00011233700001866964,
"count": 1,
"self": 0.00011233700001866964,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 466.10962684006245,
"count": 443604,
"is_parallel": true,
"self": 10.071502579919297,
"children": {
"process_trajectory": {
"total": 252.89972915714225,
"count": 443604,
"is_parallel": true,
"self": 251.28965325214222,
"children": {
"RLTrainer._checkpoint": {
"total": 1.6100759050000306,
"count": 4,
"is_parallel": true,
"self": 1.6100759050000306
}
}
},
"_update_policy": {
"total": 203.1383951030009,
"count": 90,
"is_parallel": true,
"self": 81.84818959699874,
"children": {
"TorchPPOOptimizer.update": {
"total": 121.29020550600217,
"count": 4584,
"is_parallel": true,
"self": 121.29020550600217
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.23270937200004482,
"count": 1,
"self": 0.0013543070001560409,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23135506499988878,
"count": 1,
"self": 0.23135506499988878
}
}
}
}
}
}
}