FernandoD95's picture
First Push
121c382 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.40421146154403687,
"min": 0.3900611400604248,
"max": 1.4220385551452637,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12229.822265625,
"min": 11832.89453125,
"max": 43138.9609375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989941.0,
"min": 29952.0,
"max": 989941.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989941.0,
"min": 29952.0,
"max": 989941.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.45397019386291504,
"min": -0.09806715697050095,
"max": 0.45397019386291504,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 122.11798095703125,
"min": -23.536117553710938,
"max": 122.11798095703125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.03029509261250496,
"min": 0.012826653197407722,
"max": 0.37066441774368286,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 8.14937973022461,
"min": 3.3477563858032227,
"max": 89.70079040527344,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0646145743251379,
"min": 0.06449364745778786,
"max": 0.07379282712229808,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9046040405519306,
"min": 0.4906740198552478,
"max": 1.0362290481342844,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014095034164451395,
"min": 0.0011287499840143447,
"max": 0.014734205211476387,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19733047830231953,
"min": 0.013544999808172137,
"max": 0.21031653406994424,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.736625992585715e-06,
"min": 7.736625992585715e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010831276389620001,
"min": 0.00010831276389620001,
"max": 0.0037586017471328,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10257884285714287,
"min": 0.10257884285714287,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4361038000000002,
"min": 1.3886848,
"max": 2.6528672,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026762640142857157,
"min": 0.00026762640142857157,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003746769620000002,
"min": 0.003746769620000002,
"max": 0.12530143327999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01127512939274311,
"min": 0.01127512939274311,
"max": 0.5623432397842407,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15785181522369385,
"min": 0.15785181522369385,
"max": 3.9364025592803955,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 396.8333333333333,
"min": 396.8333333333333,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28572.0,
"min": 15984.0,
"max": 32609.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4642249792814255,
"min": -1.0000000521540642,
"max": 1.4642249792814255,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 105.42419850826263,
"min": -28.59200168401003,
"max": 105.42419850826263,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4642249792814255,
"min": -1.0000000521540642,
"max": 1.4642249792814255,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 105.42419850826263,
"min": -28.59200168401003,
"max": 105.42419850826263,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04601876299622947,
"min": 0.04601876299622947,
"max": 10.807187784463167,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.313350935728522,
"min": 3.313350935728522,
"max": 172.91500455141068,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1709818948",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1709821087"
},
"total": 2139.025751825,
"count": 1,
"self": 0.48922855899945716,
"children": {
"run_training.setup": {
"total": 0.07241502400006539,
"count": 1,
"self": 0.07241502400006539
},
"TrainerController.start_learning": {
"total": 2138.4641082420003,
"count": 1,
"self": 1.4201732789338166,
"children": {
"TrainerController._reset_env": {
"total": 2.7074063380000553,
"count": 1,
"self": 2.7074063380000553
},
"TrainerController.advance": {
"total": 2134.249281587067,
"count": 63716,
"self": 1.4183746530584358,
"children": {
"env_step": {
"total": 1503.2300916709958,
"count": 63716,
"self": 1370.6750627439774,
"children": {
"SubprocessEnvManager._take_step": {
"total": 131.71663118400556,
"count": 63716,
"self": 4.750618732021394,
"children": {
"TorchPolicy.evaluate": {
"total": 126.96601245198417,
"count": 62556,
"self": 126.96601245198417
}
}
},
"workers": {
"total": 0.8383977430128198,
"count": 63716,
"self": 0.0,
"children": {
"worker_root": {
"total": 2133.407904239973,
"count": 63716,
"is_parallel": true,
"self": 883.2150442549532,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003378153000085149,
"count": 1,
"is_parallel": true,
"self": 0.0007955710000260297,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0025825820000591193,
"count": 8,
"is_parallel": true,
"self": 0.0025825820000591193
}
}
},
"UnityEnvironment.step": {
"total": 0.054230354000083025,
"count": 1,
"is_parallel": true,
"self": 0.0006506530000933708,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047547899998789944,
"count": 1,
"is_parallel": true,
"self": 0.00047547899998789944
},
"communicator.exchange": {
"total": 0.051361457999973936,
"count": 1,
"is_parallel": true,
"self": 0.051361457999973936
},
"steps_from_proto": {
"total": 0.0017427640000278188,
"count": 1,
"is_parallel": true,
"self": 0.00037689500004489673,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001365868999982922,
"count": 8,
"is_parallel": true,
"self": 0.001365868999982922
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1250.1928599850198,
"count": 63715,
"is_parallel": true,
"self": 34.54860331109671,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.683440513997994,
"count": 63715,
"is_parallel": true,
"self": 23.683440513997994
},
"communicator.exchange": {
"total": 1092.4503276479088,
"count": 63715,
"is_parallel": true,
"self": 1092.4503276479088
},
"steps_from_proto": {
"total": 99.51048851201631,
"count": 63715,
"is_parallel": true,
"self": 19.92456077600309,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.58592773601322,
"count": 509720,
"is_parallel": true,
"self": 79.58592773601322
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 629.6008152630125,
"count": 63716,
"self": 2.7628627929852883,
"children": {
"process_trajectory": {
"total": 128.62663711702317,
"count": 63716,
"self": 128.42107384102292,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20556327600024815,
"count": 2,
"self": 0.20556327600024815
}
}
},
"_update_policy": {
"total": 498.211315353004,
"count": 456,
"self": 289.40749924605393,
"children": {
"TorchPPOOptimizer.update": {
"total": 208.80381610695008,
"count": 22800,
"self": 208.80381610695008
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.649997991800774e-07,
"count": 1,
"self": 9.649997991800774e-07
},
"TrainerController._save_models": {
"total": 0.08724607299973286,
"count": 1,
"self": 0.0013949629997114243,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08585111000002144,
"count": 1,
"self": 0.08585111000002144
}
}
}
}
}
}
}