{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.5407944321632385, "min": 0.5407944321632385, "max": 1.4194328784942627, "count": 33 }, "Pyramids.Policy.Entropy.sum": { "value": 15929.640625, "min": 15929.640625, "max": 43059.9140625, "count": 33 }, "Pyramids.Step.mean": { "value": 989963.0, "min": 29979.0, "max": 989963.0, "count": 33 }, "Pyramids.Step.sum": { "value": 989963.0, "min": 29979.0, "max": 989963.0, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.3484165370464325, "min": -0.11176275461912155, "max": 0.40881064534187317, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 89.89146423339844, "min": -26.855655670166016, "max": 107.92601013183594, "count": 33 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": 0.057468585669994354, "min": 0.0051481276750564575, "max": 0.3751252293586731, "count": 33 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": 14.826894760131836, "min": 1.3385131359100342, "max": 89.2798080444336, "count": 33 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.0675775079041892, "min": 0.06443519987353757, "max": 0.07235690733484391, "count": 33 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 0.9460851106586489, "min": 0.5546631612751222, "max": 1.0536271957383838, "count": 33 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.010738555208320741, "min": 0.00011590236012941646, "max": 0.013400470810444504, "count": 33 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.15033977291649037, "min": 0.0016226330418118303, "max": 0.19469783356722545, "count": 33 }, "Pyramids.Policy.LearningRate.mean": { "value": 7.556176052735712e-06, "min": 7.556176052735712e-06, "max": 0.0002948513642162125, "count": 33 }, "Pyramids.Policy.LearningRate.sum": { "value": 0.00010578646473829997, "min": 0.00010578646473829997, "max": 0.0036327178890940993, "count": 33 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.10251869285714287, "min": 0.10251869285714287, "max": 0.1982837875, "count": 33 }, "Pyramids.Policy.Epsilon.sum": { "value": 1.4352617, "min": 1.4352617, "max": 2.6109059000000006, "count": 33 }, "Pyramids.Policy.Beta.mean": { "value": 0.0002616174164285714, "min": 0.0002616174164285714, "max": 0.009828550371249998, "count": 33 }, "Pyramids.Policy.Beta.sum": { "value": 0.0036626438299999994, "min": 0.0036626438299999994, "max": 0.12110949941000002, "count": 33 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.010805988684296608, "min": 0.010329409502446651, "max": 0.4784727692604065, "count": 33 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.1512838453054428, "min": 0.14461173117160797, "max": 3.827782154083252, "count": 33 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 538.3076923076923, "min": 453.90625, "max": 999.0, "count": 33 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 27992.0, "min": 16394.0, "max": 33022.0, "count": 33 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 1.3078038146576056, "min": -1.0000000521540642, "max": 1.4491015632474233, "count": 33 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 68.00579836219549, "min": -32.000001668930054, "max": 96.2421982884407, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 1.3078038146576056, "min": -1.0000000521540642, "max": 1.4491015632474233, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 68.00579836219549, "min": -32.000001668930054, "max": 96.2421982884407, "count": 33 }, "Pyramids.Policy.RndReward.mean": { "value": 0.06095652412757492, "min": 0.05056557125749011, "max": 9.374293010901003, "count": 33 }, "Pyramids.Policy.RndReward.sum": { "value": 3.169739254633896, "min": 3.169739254633896, "max": 159.36298118531704, "count": 33 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1714003124", "python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]", "command_line_arguments": "/home/rahil/.local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.2.2+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1714005420" }, "total": 2296.2144462290016, "count": 1, "self": 0.581255538003461, "children": { "run_training.setup": { "total": 0.02242287500121165, "count": 1, "self": 0.02242287500121165 }, "TrainerController.start_learning": { "total": 2295.610767815997, "count": 1, "self": 1.155815744496067, "children": { "TrainerController._reset_env": { "total": 4.007934984998428, "count": 1, "self": 4.007934984998428 }, "TrainerController.advance": { "total": 2290.3370513675036, "count": 63396, "self": 1.0466369310743175, "children": { "env_step": { "total": 1498.069434683057, "count": 63396, "self": 1259.7137190044887, "children": { "SubprocessEnvManager._take_step": { "total": 237.5517519243367, "count": 63396, "self": 3.6873619682737626, "children": { "TorchPolicy.evaluate": { "total": 233.86438995606295, "count": 62566, "self": 233.86438995606295 } } }, "workers": { "total": 0.8039637542315177, "count": 63396, "self": 0.0, "children": { "worker_root": { "total": 2292.6426197746478, "count": 63396, "is_parallel": true, "self": 1118.4023621678061, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.00187613600428449, "count": 1, "is_parallel": true, "self": 0.0006184490121086128, "children": { "_process_rank_one_or_two_observation": { "total": 0.001257686992175877, "count": 8, "is_parallel": true, "self": 0.001257686992175877 } } }, "UnityEnvironment.step": { "total": 0.0508186100050807, "count": 1, "is_parallel": true, "self": 0.00029684900800930336, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00038073899486334994, "count": 1, "is_parallel": true, "self": 0.00038073899486334994 }, "communicator.exchange": { "total": 0.048512134002521634, "count": 1, "is_parallel": true, "self": 0.048512134002521634 }, "steps_from_proto": { "total": 0.0016288879996864125, "count": 1, "is_parallel": true, "self": 0.0005928890022914857, "children": { "_process_rank_one_or_two_observation": { "total": 0.0010359989973949268, "count": 8, "is_parallel": true, "self": 0.0010359989973949268 } } } } } } }, "UnityEnvironment.step": { "total": 1174.2402576068416, "count": 63395, "is_parallel": true, "self": 17.405615067051258, "children": { "UnityEnvironment._generate_step_input": { "total": 12.33895807834051, "count": 63395, "is_parallel": true, "self": 12.33895807834051 }, "communicator.exchange": { "total": 1095.2886048530636, "count": 63395, "is_parallel": true, "self": 1095.2886048530636 }, "steps_from_proto": { "total": 49.207079608386266, "count": 63395, "is_parallel": true, "self": 11.143556100636488, "children": { "_process_rank_one_or_two_observation": { "total": 38.06352350774978, "count": 507160, "is_parallel": true, "self": 38.06352350774978 } } } } } } } } } } }, "trainer_advance": { "total": 791.2209797533724, "count": 63396, "self": 2.2256396567827323, "children": { "process_trajectory": { "total": 124.33191163056472, "count": 63396, "self": 123.80135770155903, "children": { "RLTrainer._checkpoint": { "total": 0.5305539290056913, "count": 2, "self": 0.5305539290056913 } } }, "_update_policy": { "total": 664.6634284660249, "count": 451, "self": 273.25345113658113, "children": { "TorchPPOOptimizer.update": { "total": 391.4099773294438, "count": 22776, "self": 391.4099773294438 } } } } } } }, "trainer_threads": { "total": 6.599948392249644e-07, "count": 1, "self": 6.599948392249644e-07 }, "TrainerController._save_models": { "total": 0.10996505900402553, "count": 1, "self": 0.0021994560083840042, "children": { "RLTrainer._checkpoint": { "total": 0.10776560299564153, "count": 1, "self": 0.10776560299564153 } } } } } } }