ppo-Pyramids / run_logs /timers.json
marik0's picture
First Push
fc815cb
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.1864638477563858,
"min": 0.18039709329605103,
"max": 1.4596576690673828,
"count": 66
},
"Pyramids.Policy.Entropy.sum": {
"value": 5528.2802734375,
"min": 5528.2802734375,
"max": 44280.17578125,
"count": 66
},
"Pyramids.Step.mean": {
"value": 1979960.0,
"min": 29952.0,
"max": 1979960.0,
"count": 66
},
"Pyramids.Step.sum": {
"value": 1979960.0,
"min": 29952.0,
"max": 1979960.0,
"count": 66
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.826989471912384,
"min": -0.089274100959301,
"max": 0.826989471912384,
"count": 66
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 246.44287109375,
"min": -21.515058517456055,
"max": 246.44287109375,
"count": 66
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0027297267224639654,
"min": -0.008920630440115929,
"max": 0.19711031019687653,
"count": 66
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.8134585618972778,
"min": -2.4264113903045654,
"max": 47.50358581542969,
"count": 66
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06875129233646605,
"min": 0.06307285478865565,
"max": 0.07546488655295622,
"count": 66
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9625180927105248,
"min": 0.4925582814083873,
"max": 1.0807502812919363,
"count": 66
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015489411715645387,
"min": 0.0007500600439720249,
"max": 0.01684945211209375,
"count": 66
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21685176401903541,
"min": 0.010500840615608348,
"max": 0.23589232956931247,
"count": 66
},
"Pyramids.Policy.LearningRate.mean": {
"value": 5.2519196779642875e-06,
"min": 5.2519196779642875e-06,
"max": 0.0002975753150939428,
"count": 66
},
"Pyramids.Policy.LearningRate.sum": {
"value": 7.352687549150003e-05,
"min": 7.352687549150003e-05,
"max": 0.003696399067867,
"count": 66
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10175060714285715,
"min": 0.10175060714285715,
"max": 0.19919177142857142,
"count": 66
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4245085000000002,
"min": 1.3943424,
"max": 2.6215241,
"count": 66
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0001848856535714286,
"min": 0.0001848856535714286,
"max": 0.009919257965714285,
"count": 66
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00258839915,
"min": 0.00258839915,
"max": 0.12322008669999998,
"count": 66
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008282682858407497,
"min": 0.00788664910942316,
"max": 0.3117840588092804,
"count": 66
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11595755815505981,
"min": 0.1104130819439888,
"max": 2.182488441467285,
"count": 66
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 205.09420289855072,
"min": 205.09420289855072,
"max": 999.0,
"count": 66
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28303.0,
"min": 15984.0,
"max": 32875.0,
"count": 66
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7804086732043736,
"min": -1.0000000521540642,
"max": 1.7804086732043736,
"count": 66
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 245.69639690220356,
"min": -30.581801749765873,
"max": 245.69639690220356,
"count": 66
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7804086732043736,
"min": -1.0000000521540642,
"max": 1.7804086732043736,
"count": 66
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 245.69639690220356,
"min": -30.581801749765873,
"max": 245.69639690220356,
"count": 66
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0178303539950571,
"min": 0.0178303539950571,
"max": 6.077938006259501,
"count": 66
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.4605888513178797,
"min": 2.452773006472853,
"max": 97.24700810015202,
"count": 66
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 66
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 66
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673727071",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids2 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673731297"
},
"total": 4226.010252202,
"count": 1,
"self": 0.44766875200002687,
"children": {
"run_training.setup": {
"total": 0.11084765500001481,
"count": 1,
"self": 0.11084765500001481
},
"TrainerController.start_learning": {
"total": 4225.451735795,
"count": 1,
"self": 2.5331752540687376,
"children": {
"TrainerController._reset_env": {
"total": 9.845023167999983,
"count": 1,
"self": 9.845023167999983
},
"TrainerController.advance": {
"total": 4212.983398691931,
"count": 129362,
"self": 2.5400182918338032,
"children": {
"env_step": {
"total": 2900.859631812995,
"count": 129362,
"self": 2693.850423617993,
"children": {
"SubprocessEnvManager._take_step": {
"total": 205.41543536902603,
"count": 129362,
"self": 8.252066621979282,
"children": {
"TorchPolicy.evaluate": {
"total": 197.16336874704675,
"count": 125055,
"self": 66.38677600709593,
"children": {
"TorchPolicy.sample_actions": {
"total": 130.77659273995081,
"count": 125055,
"self": 130.77659273995081
}
}
}
}
},
"workers": {
"total": 1.593772825975833,
"count": 129362,
"self": 0.0,
"children": {
"worker_root": {
"total": 4218.348804893989,
"count": 129362,
"is_parallel": true,
"self": 1722.2017308919421,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006028434999990395,
"count": 1,
"is_parallel": true,
"self": 0.0036583659999962492,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002370068999994146,
"count": 8,
"is_parallel": true,
"self": 0.002370068999994146
}
}
},
"UnityEnvironment.step": {
"total": 0.09690124799999467,
"count": 1,
"is_parallel": true,
"self": 0.001299773999988929,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00043830500001718065,
"count": 1,
"is_parallel": true,
"self": 0.00043830500001718065
},
"communicator.exchange": {
"total": 0.09259949899998787,
"count": 1,
"is_parallel": true,
"self": 0.09259949899998787
},
"steps_from_proto": {
"total": 0.002563670000000684,
"count": 1,
"is_parallel": true,
"self": 0.0004910740000241276,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020725959999765564,
"count": 8,
"is_parallel": true,
"self": 0.0020725959999765564
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2496.147074002047,
"count": 129361,
"is_parallel": true,
"self": 55.516251133009064,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 43.444309489002165,
"count": 129361,
"is_parallel": true,
"self": 43.444309489002165
},
"communicator.exchange": {
"total": 2215.311333026045,
"count": 129361,
"is_parallel": true,
"self": 2215.311333026045
},
"steps_from_proto": {
"total": 181.8751803539908,
"count": 129361,
"is_parallel": true,
"self": 43.05065154167369,
"children": {
"_process_rank_one_or_two_observation": {
"total": 138.8245288123171,
"count": 1034888,
"is_parallel": true,
"self": 138.8245288123171
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1309.5837485871023,
"count": 129362,
"self": 5.0889969221204865,
"children": {
"process_trajectory": {
"total": 287.4317184629816,
"count": 129362,
"self": 287.045639336981,
"children": {
"RLTrainer._checkpoint": {
"total": 0.38607912600059535,
"count": 4,
"self": 0.38607912600059535
}
}
},
"_update_policy": {
"total": 1017.0630332020003,
"count": 924,
"self": 386.5747298649984,
"children": {
"TorchPPOOptimizer.update": {
"total": 630.4883033370019,
"count": 45600,
"self": 630.4883033370019
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.200004231184721e-07,
"count": 1,
"self": 8.200004231184721e-07
},
"TrainerController._save_models": {
"total": 0.09013786099967547,
"count": 1,
"self": 0.0014619029998357291,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08867595799983974,
"count": 1,
"self": 0.08867595799983974
}
}
}
}
}
}
}