97jmlr's picture
First Push
9390932
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8118503093719482,
"min": 0.8118503093719482,
"max": 2.8699111938476562,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7769.4072265625,
"min": 7769.4072265625,
"max": 29390.76171875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.745680809020996,
"min": 0.2854953408241272,
"max": 11.745680809020996,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2290.40771484375,
"min": 55.3860969543457,
"max": 2372.05810546875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06845606946273405,
"min": 0.06284241833311492,
"max": 0.07690054145322053,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2738242778509362,
"min": 0.2513696733324597,
"max": 0.38450270726610264,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18841638416051865,
"min": 0.10010322181202069,
"max": 0.2699546430040808,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7536655366420746,
"min": 0.40041288724808277,
"max": 1.349773215020404,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 23.34090909090909,
"min": 2.909090909090909,
"max": 23.34090909090909,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1027.0,
"min": 128.0,
"max": 1271.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 23.34090909090909,
"min": 2.909090909090909,
"max": 23.34090909090909,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1027.0,
"min": 128.0,
"max": 1271.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1687645981",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1687646490"
},
"total": 508.862887901,
"count": 1,
"self": 0.7832372840000517,
"children": {
"run_training.setup": {
"total": 0.04244807899999614,
"count": 1,
"self": 0.04244807899999614
},
"TrainerController.start_learning": {
"total": 508.037202538,
"count": 1,
"self": 0.6019847590006293,
"children": {
"TrainerController._reset_env": {
"total": 4.2573255620000054,
"count": 1,
"self": 4.2573255620000054
},
"TrainerController.advance": {
"total": 502.9353815909991,
"count": 18212,
"self": 0.31525086700185057,
"children": {
"env_step": {
"total": 502.62013072399725,
"count": 18212,
"self": 365.73464369900876,
"children": {
"SubprocessEnvManager._take_step": {
"total": 136.58206729098356,
"count": 18212,
"self": 2.1157061719724197,
"children": {
"TorchPolicy.evaluate": {
"total": 134.46636111901114,
"count": 18212,
"self": 134.46636111901114
}
}
},
"workers": {
"total": 0.30341973400493316,
"count": 18212,
"self": 0.0,
"children": {
"worker_root": {
"total": 506.0315736019952,
"count": 18212,
"is_parallel": true,
"self": 233.63258019900422,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00539523800000552,
"count": 1,
"is_parallel": true,
"self": 0.0037661539998907756,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016290840001147444,
"count": 10,
"is_parallel": true,
"self": 0.0016290840001147444
}
}
},
"UnityEnvironment.step": {
"total": 0.10885009000003265,
"count": 1,
"is_parallel": true,
"self": 0.0006934199999477642,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00038361400004305324,
"count": 1,
"is_parallel": true,
"self": 0.00038361400004305324
},
"communicator.exchange": {
"total": 0.10564443700002357,
"count": 1,
"is_parallel": true,
"self": 0.10564443700002357
},
"steps_from_proto": {
"total": 0.0021286190000182614,
"count": 1,
"is_parallel": true,
"self": 0.00035623299993403634,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001772386000084225,
"count": 10,
"is_parallel": true,
"self": 0.001772386000084225
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 272.398993402991,
"count": 18211,
"is_parallel": true,
"self": 11.110881938983084,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.89361089799695,
"count": 18211,
"is_parallel": true,
"self": 5.89361089799695
},
"communicator.exchange": {
"total": 215.96966868400978,
"count": 18211,
"is_parallel": true,
"self": 215.96966868400978
},
"steps_from_proto": {
"total": 39.42483188200117,
"count": 18211,
"is_parallel": true,
"self": 7.394979664984476,
"children": {
"_process_rank_one_or_two_observation": {
"total": 32.029852217016696,
"count": 182110,
"is_parallel": true,
"self": 32.029852217016696
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.353600012218521e-05,
"count": 1,
"self": 9.353600012218521e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 498.93462695100345,
"count": 479104,
"is_parallel": true,
"self": 10.907167836944723,
"children": {
"process_trajectory": {
"total": 277.7545052370589,
"count": 479104,
"is_parallel": true,
"self": 274.78935320005894,
"children": {
"RLTrainer._checkpoint": {
"total": 2.965152036999939,
"count": 4,
"is_parallel": true,
"self": 2.965152036999939
}
}
},
"_update_policy": {
"total": 210.27295387699985,
"count": 90,
"is_parallel": true,
"self": 78.13002947500104,
"children": {
"TorchPPOOptimizer.update": {
"total": 132.1429244019988,
"count": 4587,
"is_parallel": true,
"self": 132.1429244019988
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.24241709000011724,
"count": 1,
"self": 0.0011288390001027437,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2412882510000145,
"count": 1,
"self": 0.2412882510000145
}
}
}
}
}
}
}