{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.5987730622291565, "min": 0.5987730622291565, "max": 1.5046581029891968, "count": 33 }, "Pyramids.Policy.Entropy.sum": { "value": 17924.87109375, "min": 17924.87109375, "max": 45645.30859375, "count": 33 }, "Pyramids.Step.mean": { "value": 989889.0, "min": 29952.0, "max": 989889.0, "count": 33 }, "Pyramids.Step.sum": { "value": 989889.0, "min": 29952.0, "max": 989889.0, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.5096606612205505, "min": -0.09901700168848038, "max": 0.5192990899085999, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 140.15667724609375, "min": -23.764080047607422, "max": 140.21075439453125, "count": 33 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": -0.05230715498328209, "min": -0.05230715498328209, "max": 0.23122337460517883, "count": 33 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": -14.384467124938965, "min": -14.384467124938965, "max": 55.72483444213867, "count": 33 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.06927831231613829, "min": 0.0652915896885834, "max": 0.07308326180264843, "count": 33 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 0.9698963724259361, "min": 0.49624392367235726, "max": 1.040964106615409, "count": 33 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.015730017961916115, "min": 0.0007061889775499739, "max": 0.015730017961916115, "count": 33 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.2202202514668256, "min": 0.00918045670814966, "max": 0.2202202514668256, "count": 33 }, "Pyramids.Policy.LearningRate.mean": { "value": 7.360726117885712e-06, "min": 7.360726117885712e-06, "max": 0.00029515063018788575, "count": 33 }, "Pyramids.Policy.LearningRate.sum": { "value": 0.00010305016565039997, "min": 0.00010305016565039997, "max": 0.0036328207890597994, "count": 33 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.10245354285714285, "min": 0.10245354285714285, "max": 0.19838354285714285, "count": 33 }, "Pyramids.Policy.Epsilon.sum": { "value": 1.4343496, "min": 1.3886848, "max": 2.6109402000000004, "count": 33 }, "Pyramids.Policy.Beta.mean": { "value": 0.0002551089314285714, "min": 0.0002551089314285714, "max": 0.00983851593142857, "count": 33 }, "Pyramids.Policy.Beta.sum": { "value": 0.0035715250399999997, "min": 0.0035715250399999997, "max": 0.12111292597999998, "count": 33 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.0069066681899130344, "min": 0.0069066681899130344, "max": 0.36278244853019714, "count": 33 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.09669335186481476, "min": 0.09669335186481476, "max": 2.5394771099090576, "count": 33 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 380.0853658536585, "min": 380.0853658536585, "max": 999.0, "count": 33 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 31167.0, "min": 15984.0, "max": 32682.0, "count": 33 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 1.6199146093028347, "min": -1.0000000521540642, "max": 1.6199146093028347, "count": 33 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 132.83299796283245, "min": -30.394601725041866, "max": 132.83299796283245, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 1.6199146093028347, "min": -1.0000000521540642, "max": 1.6199146093028347, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 132.83299796283245, "min": -30.394601725041866, "max": 132.83299796283245, "count": 33 }, "Pyramids.Policy.RndReward.mean": { "value": 0.0269876692511788, "min": 0.0269876692511788, "max": 7.449540082365274, "count": 33 }, "Pyramids.Policy.RndReward.sum": { "value": 2.2129888785966614, "min": 2.028531575706438, "max": 119.19264131784439, "count": 33 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1690801412", "python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids1 --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1690803624" }, "total": 2212.2151022919998, "count": 1, "self": 0.4760073699994791, "children": { "run_training.setup": { "total": 0.05280179700002918, "count": 1, "self": 0.05280179700002918 }, "TrainerController.start_learning": { "total": 2211.6862931250002, "count": 1, "self": 1.5234738619556083, "children": { "TrainerController._reset_env": { "total": 4.766926911999917, "count": 1, "self": 4.766926911999917 }, "TrainerController.advance": { "total": 2205.2997042460447, "count": 63643, "self": 1.4755558381148148, "children": { "env_step": { "total": 1528.1981147739616, "count": 63643, "self": 1410.5495244878684, "children": { "SubprocessEnvManager._take_step": { "total": 116.75769842503314, "count": 63643, "self": 5.1409938790143315, "children": { "TorchPolicy.evaluate": { "total": 111.61670454601881, "count": 62551, "self": 111.61670454601881 } } }, "workers": { "total": 0.8908918610600267, "count": 63643, "self": 0.0, "children": { "worker_root": { "total": 2206.360707123975, "count": 63643, "is_parallel": true, "self": 918.2574481479353, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0029096929999923304, "count": 1, "is_parallel": true, "self": 0.0007739909997326322, "children": { "_process_rank_one_or_two_observation": { "total": 0.0021357020002596983, "count": 8, "is_parallel": true, "self": 0.0021357020002596983 } } }, "UnityEnvironment.step": { "total": 0.05268580799997835, "count": 1, "is_parallel": true, "self": 0.0005628490000617603, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0005537550000553892, "count": 1, "is_parallel": true, "self": 0.0005537550000553892 }, "communicator.exchange": { "total": 0.04963871699987976, "count": 1, "is_parallel": true, "self": 0.04963871699987976 }, "steps_from_proto": { "total": 0.00193048699998144, "count": 1, "is_parallel": true, "self": 0.00038946299969211395, "children": { "_process_rank_one_or_two_observation": { "total": 0.001541024000289326, "count": 8, "is_parallel": true, "self": 0.001541024000289326 } } } } } } }, "UnityEnvironment.step": { "total": 1288.1032589760396, "count": 63642, "is_parallel": true, "self": 35.587703936884736, "children": { "UnityEnvironment._generate_step_input": { "total": 25.103261722093748, "count": 63642, "is_parallel": true, "self": 25.103261722093748 }, "communicator.exchange": { "total": 1117.4606786220859, "count": 63642, "is_parallel": true, "self": 1117.4606786220859 }, "steps_from_proto": { "total": 109.9516146949752, "count": 63642, "is_parallel": true, "self": 22.765514459994847, "children": { "_process_rank_one_or_two_observation": { "total": 87.18610023498036, "count": 509136, "is_parallel": true, "self": 87.18610023498036 } } } } } } } } } } }, "trainer_advance": { "total": 675.6260336339681, "count": 63643, "self": 2.6461427439326144, "children": { "process_trajectory": { "total": 117.80785592303482, "count": 63643, "self": 117.5895516250348, "children": { "RLTrainer._checkpoint": { "total": 0.21830429800002094, "count": 2, "self": 0.21830429800002094 } } }, "_update_policy": { "total": 555.1720349670006, "count": 452, "self": 363.6311133009815, "children": { "TorchPPOOptimizer.update": { "total": 191.54092166601913, "count": 22776, "self": 191.54092166601913 } } } } } } }, "trainer_threads": { "total": 9.509999472356867e-07, "count": 1, "self": 9.509999472356867e-07 }, "TrainerController._save_models": { "total": 0.09618715400029032, "count": 1, "self": 0.0014094330003899813, "children": { "RLTrainer._checkpoint": { "total": 0.09477772099990034, "count": 1, "self": 0.09477772099990034 } } } } } } }