{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.48431360721588135, "min": 0.48431360721588135, "max": 1.431001901626587, "count": 33 }, "Pyramids.Policy.Entropy.sum": { "value": 14568.1533203125, "min": 14395.1435546875, "max": 43410.875, "count": 33 }, "Pyramids.Step.mean": { "value": 989971.0, "min": 29952.0, "max": 989971.0, "count": 33 }, "Pyramids.Step.sum": { "value": 989971.0, "min": 29952.0, "max": 989971.0, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.3780577778816223, "min": -0.10151124745607376, "max": 0.3780577778816223, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 100.18531036376953, "min": -24.159677505493164, "max": 100.18531036376953, "count": 33 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": 0.02284999005496502, "min": 0.0050504328683018684, "max": 0.16814826428890228, "count": 33 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": 6.0552473068237305, "min": 1.2979612350463867, "max": 40.35558319091797, "count": 33 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.06422569570020728, "min": 0.06422569570020728, "max": 0.07357925994576708, "count": 33 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 0.8991597398029019, "min": 0.489691445077756, "max": 1.076735538109799, "count": 33 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.013039958523309211, "min": 8.130076319978916e-05, "max": 0.013039958523309211, "count": 33 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.18255941932632896, "min": 0.001056909921597259, "max": 0.18255941932632896, "count": 33 }, "Pyramids.Policy.LearningRate.mean": { "value": 7.589090327478572e-06, "min": 7.589090327478572e-06, "max": 0.00029515063018788575, "count": 33 }, "Pyramids.Policy.LearningRate.sum": { "value": 0.0001062472645847, "min": 0.0001062472645847, "max": 0.0031379909540031, "count": 33 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.10252966428571429, "min": 0.10252966428571429, "max": 0.19838354285714285, "count": 33 }, "Pyramids.Policy.Epsilon.sum": { "value": 1.4354153, "min": 1.3691136000000002, "max": 2.3459969000000003, "count": 33 }, "Pyramids.Policy.Beta.mean": { "value": 0.00026271346214285716, "min": 0.00026271346214285716, "max": 0.00983851593142857, "count": 33 }, "Pyramids.Policy.Beta.sum": { "value": 0.00367798847, "min": 0.00367798847, "max": 0.10462509031000002, "count": 33 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.012695529498159885, "min": 0.012695529498159885, "max": 0.33069372177124023, "count": 33 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.17773741483688354, "min": 0.17773741483688354, "max": 2.3148560523986816, "count": 33 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 457.12857142857143, "min": 457.12857142857143, "max": 999.0, "count": 33 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 31999.0, "min": 15984.0, "max": 33169.0, "count": 33 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 1.3713914072939328, "min": -1.0000000521540642, "max": 1.3713914072939328, "count": 33 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 95.9973985105753, "min": -32.000001668930054, "max": 95.9973985105753, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 1.3713914072939328, "min": -1.0000000521540642, "max": 1.3713914072939328, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 95.9973985105753, "min": -32.000001668930054, "max": 95.9973985105753, "count": 33 }, "Pyramids.Policy.RndReward.mean": { "value": 0.05996217732234592, "min": 0.05996217732234592, "max": 6.285287686623633, "count": 33 }, "Pyramids.Policy.RndReward.sum": { "value": 4.197352412564214, "min": 3.8896098221594, "max": 100.56460298597813, "count": 33 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1682962189", "python_version": "3.9.5 (default, Jun 4 2021, 12:28:51) \n[GCC 7.5.0]", "command_line_arguments": "/home/user/.virtualenvs/datascience/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=../training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1682964014" }, "total": 1822.2456980000006, "count": 1, "self": 0.5298074999991513, "children": { "run_training.setup": { "total": 0.038751800000682124, "count": 1, "self": 0.038751800000682124 }, "TrainerController.start_learning": { "total": 1821.6771387000008, "count": 1, "self": 1.3620082999823353, "children": { "TrainerController._reset_env": { "total": 6.587934099999984, "count": 1, "self": 6.587934099999984 }, "TrainerController.advance": { "total": 1813.546466000018, "count": 63404, "self": 1.378505200177642, "children": { "env_step": { "total": 1062.6699173000197, "count": 63404, "self": 835.9256593000064, "children": { "SubprocessEnvManager._take_step": { "total": 225.88229290008712, "count": 63404, "self": 4.877655499924003, "children": { "TorchPolicy.evaluate": { "total": 221.00463740016312, "count": 62559, "self": 221.00463740016312 } } }, "workers": { "total": 0.8619650999262376, "count": 63404, "self": 0.0, "children": { "worker_root": { "total": 1818.0819958001575, "count": 63404, "is_parallel": true, "self": 1077.533062900041, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0016719999994165846, "count": 1, "is_parallel": true, "self": 0.0005870000013601384, "children": { "_process_rank_one_or_two_observation": { "total": 0.0010849999980564462, "count": 8, "is_parallel": true, "self": 0.0010849999980564462 } } }, "UnityEnvironment.step": { "total": 0.05128659999991214, "count": 1, "is_parallel": true, "self": 0.00027570000111154513, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00025909999931172933, "count": 1, "is_parallel": true, "self": 0.00025909999931172933 }, "communicator.exchange": { "total": 0.049910199999430915, "count": 1, "is_parallel": true, "self": 0.049910199999430915 }, "steps_from_proto": { "total": 0.0008416000000579515, "count": 1, "is_parallel": true, "self": 0.00022090000038588187, "children": { "_process_rank_one_or_two_observation": { "total": 0.0006206999996720697, "count": 8, "is_parallel": true, "self": 0.0006206999996720697 } } } } } } }, "UnityEnvironment.step": { "total": 740.5489329001166, "count": 63403, "is_parallel": true, "self": 18.71504599991931, "children": { "UnityEnvironment._generate_step_input": { "total": 14.149700600120013, "count": 63403, "is_parallel": true, "self": 14.149700600120013 }, "communicator.exchange": { "total": 653.8734275999404, "count": 63403, "is_parallel": true, "self": 653.8734275999404 }, "steps_from_proto": { "total": 53.81075870013683, "count": 63403, "is_parallel": true, "self": 13.364155800294611, "children": { "_process_rank_one_or_two_observation": { "total": 40.44660289984222, "count": 507224, "is_parallel": true, "self": 40.44660289984222 } } } } } } } } } } }, "trainer_advance": { "total": 749.4980434998206, "count": 63404, "self": 2.4058092001096156, "children": { "process_trajectory": { "total": 128.68829909970918, "count": 63404, "self": 128.31381939970925, "children": { "RLTrainer._checkpoint": { "total": 0.374479699999938, "count": 2, "self": 0.374479699999938 } } }, "_update_policy": { "total": 618.4039352000018, "count": 435, "self": 307.70911309995336, "children": { "TorchPPOOptimizer.update": { "total": 310.69482210004844, "count": 22836, "self": 310.69482210004844 } } } } } } }, "trainer_threads": { "total": 8.000006346264854e-07, "count": 1, "self": 8.000006346264854e-07 }, "TrainerController._save_models": { "total": 0.1807294999998703, "count": 1, "self": 0.0063875999985612, "children": { "RLTrainer._checkpoint": { "total": 0.1743419000013091, "count": 1, "self": 0.1743419000013091 } } } } } } }