{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.5801627039909363, "min": 0.5565173625946045, "max": 1.5031931400299072, "count": 33 }, "Pyramids.Policy.Entropy.sum": { "value": 17442.01171875, "min": 16677.712890625, "max": 45600.8671875, "count": 33 }, "Pyramids.Step.mean": { "value": 989930.0, "min": 29905.0, "max": 989930.0, "count": 33 }, "Pyramids.Step.sum": { "value": 989930.0, "min": 29905.0, "max": 989930.0, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.04801265150308609, "min": -0.10678725689649582, "max": 0.04801265150308609, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 11.811112403869629, "min": -25.628942489624023, "max": 11.811112403869629, "count": 33 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": 0.024382250383496284, "min": 0.005832829978317022, "max": 0.31173238158226013, "count": 33 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": 5.99803352355957, "min": 1.4115447998046875, "max": 73.88057708740234, "count": 33 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.06795171411976698, "min": 0.06489726954232054, "max": 0.07188028846235002, "count": 33 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 0.9513239976767378, "min": 0.4749443289708753, "max": 1.0782043269352504, "count": 33 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.007244281295608072, "min": 9.57479069882561e-05, "max": 0.012981635506771148, "count": 33 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.10141993813851301, "min": 0.0013404706978355854, "max": 0.10990405614713347, "count": 33 }, "Pyramids.Policy.LearningRate.mean": { "value": 7.556968909614285e-06, "min": 7.556968909614285e-06, "max": 0.0002952349301597857, "count": 33 }, "Pyramids.Policy.LearningRate.sum": { "value": 0.00010579756473459999, "min": 0.00010579756473459999, "max": 0.0035091020302994, "count": 33 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.10251895714285718, "min": 0.10251895714285718, "max": 0.19841164285714283, "count": 33 }, "Pyramids.Policy.Epsilon.sum": { "value": 1.4352654000000005, "min": 1.3888814999999999, "max": 2.5697006, "count": 33 }, "Pyramids.Policy.Beta.mean": { "value": 0.0002616438185714286, "min": 0.0002616438185714286, "max": 0.009841323121428571, "count": 33 }, "Pyramids.Policy.Beta.sum": { "value": 0.003663013460000001, "min": 0.003663013460000001, "max": 0.11699308993999999, "count": 33 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.013510710559785366, "min": 0.013510710559785366, "max": 0.46808338165283203, "count": 33 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.18914994597434998, "min": 0.18914994597434998, "max": 3.276583671569824, "count": 33 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 859.2285714285714, "min": 825.8421052631579, "max": 999.0, "count": 33 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 30073.0, "min": 16704.0, "max": 32724.0, "count": 33 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 0.08571171913953389, "min": -0.9997586733822165, "max": 0.14880536698006294, "count": 33 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 2.914198450744152, "min": -30.62580170482397, "max": 5.505798578262329, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 0.08571171913953389, "min": -0.9997586733822165, "max": 0.14880536698006294, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 2.914198450744152, "min": -30.62580170482397, "max": 5.505798578262329, "count": 33 }, "Pyramids.Policy.RndReward.mean": { "value": 0.12071712681106017, "min": 0.12071712681106017, "max": 9.37431899501997, "count": 33 }, "Pyramids.Policy.RndReward.sum": { "value": 4.104382311576046, "min": 4.104382311576046, "max": 159.36342291533947, "count": 33 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1703635370", "python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.1.2+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1703637554" }, "total": 2183.57256102, "count": 1, "self": 0.48133886300001905, "children": { "run_training.setup": { "total": 0.04678671600004236, "count": 1, "self": 0.04678671600004236 }, "TrainerController.start_learning": { "total": 2183.044435441, "count": 1, "self": 1.5479548350058394, "children": { "TrainerController._reset_env": { "total": 2.0301213689999713, "count": 1, "self": 2.0301213689999713 }, "TrainerController.advance": { "total": 2179.3788471899943, "count": 63185, "self": 1.6019401369803745, "children": { "env_step": { "total": 1530.466603358041, "count": 63185, "self": 1386.4894205390121, "children": { "SubprocessEnvManager._take_step": { "total": 143.01868872901127, "count": 63185, "self": 5.210105619051092, "children": { "TorchPolicy.evaluate": { "total": 137.80858310996018, "count": 62565, "self": 137.80858310996018 } } }, "workers": { "total": 0.9584940900176662, "count": 63185, "self": 0.0, "children": { "worker_root": { "total": 2177.4639629529784, "count": 63185, "is_parallel": true, "self": 921.46010739093, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0017445690000386094, "count": 1, "is_parallel": true, "self": 0.0005478530003983906, "children": { "_process_rank_one_or_two_observation": { "total": 0.0011967159996402188, "count": 8, "is_parallel": true, "self": 0.0011967159996402188 } } }, "UnityEnvironment.step": { "total": 0.054926417000160654, "count": 1, "is_parallel": true, "self": 0.0006076330002997565, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0005188380000618054, "count": 1, "is_parallel": true, "self": 0.0005188380000618054 }, "communicator.exchange": { "total": 0.05207133899989458, "count": 1, "is_parallel": true, "self": 0.05207133899989458 }, "steps_from_proto": { "total": 0.001728606999904514, "count": 1, "is_parallel": true, "self": 0.0003765960004784574, "children": { "_process_rank_one_or_two_observation": { "total": 0.0013520109994260565, "count": 8, "is_parallel": true, "self": 0.0013520109994260565 } } } } } } }, "UnityEnvironment.step": { "total": 1256.0038555620483, "count": 63184, "is_parallel": true, "self": 37.04124087806076, "children": { "UnityEnvironment._generate_step_input": { "total": 26.087499939955478, "count": 63184, "is_parallel": true, "self": 26.087499939955478 }, "communicator.exchange": { "total": 1084.5280433840055, "count": 63184, "is_parallel": true, "self": 1084.5280433840055 }, "steps_from_proto": { "total": 108.34707136002658, "count": 63184, "is_parallel": true, "self": 22.483549750117163, "children": { "_process_rank_one_or_two_observation": { "total": 85.86352160990941, "count": 505472, "is_parallel": true, "self": 85.86352160990941 } } } } } } } } } } }, "trainer_advance": { "total": 647.3103036949726, "count": 63185, "self": 3.000553967985752, "children": { "process_trajectory": { "total": 133.39218095899014, "count": 63185, "self": 133.1816656269898, "children": { "RLTrainer._checkpoint": { "total": 0.210515332000341, "count": 2, "self": 0.210515332000341 } } }, "_update_policy": { "total": 510.9175687679967, "count": 454, "self": 303.4140822859547, "children": { "TorchPPOOptimizer.update": { "total": 207.50348648204204, "count": 22734, "self": 207.50348648204204 } } } } } } }, "trainer_threads": { "total": 1.3700000636163168e-06, "count": 1, "self": 1.3700000636163168e-06 }, "TrainerController._save_models": { "total": 0.08751067699995474, "count": 1, "self": 0.0013826780000272265, "children": { "RLTrainer._checkpoint": { "total": 0.08612799899992751, "count": 1, "self": 0.08612799899992751 } } } } } } }