Pyramids / run_logs /timers.json
Sirianth's picture
First Push
2100650
raw
history blame
18.7 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5396291613578796,
"min": 0.5396291613578796,
"max": 1.449646234512329,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16024.828125,
"min": 16024.828125,
"max": 43976.46875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989965.0,
"min": 29952.0,
"max": 989965.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989965.0,
"min": 29952.0,
"max": 989965.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3453983664512634,
"min": -0.11043086647987366,
"max": 0.3719022274017334,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 89.45817565917969,
"min": -26.172115325927734,
"max": 98.55409240722656,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 1.5317012071609497,
"min": -0.030698606744408607,
"max": 1.5317012071609497,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 396.7106018066406,
"min": -8.135130882263184,
"max": 396.7106018066406,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06773502037787735,
"min": 0.06409167696015697,
"max": 0.07292012126142564,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9482902852902829,
"min": 0.5104408488299794,
"max": 1.0701256441922549,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.22083097674423463,
"min": 0.0004093818227983255,
"max": 0.22083097674423463,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 3.0916336744192847,
"min": 0.005321963696378232,
"max": 3.0916336744192847,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.63758316845714e-06,
"min": 7.63758316845714e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010692616435839995,
"min": 0.00010692616435839995,
"max": 0.0035075624308125997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10254582857142858,
"min": 0.10254582857142858,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4356416,
"min": 1.3886848,
"max": 2.5691874000000006,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002643282742857142,
"min": 0.0002643282742857142,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003700595839999999,
"min": 0.003700595839999999,
"max": 0.11694182126,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009579457342624664,
"min": 0.009579457342624664,
"max": 0.49516361951828003,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1341124027967453,
"min": 0.1341124027967453,
"max": 3.4661452770233154,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 509.9818181818182,
"min": 461.3529411764706,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28049.0,
"min": 15984.0,
"max": 32676.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.1262545203620737,
"min": -1.0000000521540642,
"max": 1.3032764412243576,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 61.943998619914055,
"min": -28.050001703202724,
"max": 88.62279800325632,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.1262545203620737,
"min": -1.0000000521540642,
"max": 1.3032764412243576,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 61.943998619914055,
"min": -28.050001703202724,
"max": 88.62279800325632,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.050232053010073614,
"min": 0.04731789393768208,
"max": 10.204777055419981,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.762762915554049,
"min": 2.762762915554049,
"max": 163.2764328867197,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678490013",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1678491991"
},
"total": 1977.7811619940003,
"count": 1,
"self": 0.4309009349999542,
"children": {
"run_training.setup": {
"total": 0.1051662770000803,
"count": 1,
"self": 0.1051662770000803
},
"TrainerController.start_learning": {
"total": 1977.2450947820003,
"count": 1,
"self": 1.2969493809737287,
"children": {
"TrainerController._reset_env": {
"total": 6.036467545999585,
"count": 1,
"self": 6.036467545999585
},
"TrainerController.advance": {
"total": 1969.820643755027,
"count": 63411,
"self": 1.4133736250851143,
"children": {
"env_step": {
"total": 1355.7575151389624,
"count": 63411,
"self": 1251.348787010871,
"children": {
"SubprocessEnvManager._take_step": {
"total": 103.62293205100605,
"count": 63411,
"self": 4.585116884988565,
"children": {
"TorchPolicy.evaluate": {
"total": 99.03781516601748,
"count": 62553,
"self": 99.03781516601748
}
}
},
"workers": {
"total": 0.7857960770852515,
"count": 63411,
"self": 0.0,
"children": {
"worker_root": {
"total": 1973.032132636999,
"count": 63411,
"is_parallel": true,
"self": 833.5637494340381,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00164186600022731,
"count": 1,
"is_parallel": true,
"self": 0.0005240100003902626,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011178559998370474,
"count": 8,
"is_parallel": true,
"self": 0.0011178559998370474
}
}
},
"UnityEnvironment.step": {
"total": 0.07099848899997596,
"count": 1,
"is_parallel": true,
"self": 0.0005319359993336548,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004539760002444382,
"count": 1,
"is_parallel": true,
"self": 0.0004539760002444382
},
"communicator.exchange": {
"total": 0.06844523400013713,
"count": 1,
"is_parallel": true,
"self": 0.06844523400013713
},
"steps_from_proto": {
"total": 0.0015673430002607347,
"count": 1,
"is_parallel": true,
"self": 0.0003682210003717046,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011991219998890301,
"count": 8,
"is_parallel": true,
"self": 0.0011991219998890301
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1139.468383202961,
"count": 63410,
"is_parallel": true,
"self": 30.119587097986823,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.950190377941453,
"count": 63410,
"is_parallel": true,
"self": 21.950190377941453
},
"communicator.exchange": {
"total": 999.2319205630306,
"count": 63410,
"is_parallel": true,
"self": 999.2319205630306
},
"steps_from_proto": {
"total": 88.16668516400205,
"count": 63410,
"is_parallel": true,
"self": 18.470593462025136,
"children": {
"_process_rank_one_or_two_observation": {
"total": 69.69609170197691,
"count": 507280,
"is_parallel": true,
"self": 69.69609170197691
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 612.6497549909795,
"count": 63411,
"self": 2.4179538219636925,
"children": {
"process_trajectory": {
"total": 114.74795452101625,
"count": 63411,
"self": 114.55405293301692,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19390158799933488,
"count": 2,
"self": 0.19390158799933488
}
}
},
"_update_policy": {
"total": 495.4838466479996,
"count": 450,
"self": 316.637119372047,
"children": {
"TorchPPOOptimizer.update": {
"total": 178.84672727595262,
"count": 22770,
"self": 178.84672727595262
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.010000212583691e-07,
"count": 1,
"self": 9.010000212583691e-07
},
"TrainerController._save_models": {
"total": 0.09103319899986673,
"count": 1,
"self": 0.0014228030004233005,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08961039599944343,
"count": 1,
"self": 0.08961039599944343
}
}
}
}
}
}
}