{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.4129987955093384, "min": 1.4129987955093384, "max": 1.4285576343536377, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 71329.59375, "min": 68772.8359375, "max": 78169.6953125, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 119.6555023923445, "min": 92.61235955056179, "max": 422.8487394957983, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 50016.0, "min": 48867.0, "max": 50319.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999746.0, "min": 49688.0, "max": 1999746.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999746.0, "min": 49688.0, "max": 1999746.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.2854349613189697, "min": 0.10165108740329742, "max": 2.3696367740631104, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 955.311767578125, "min": 11.994828224182129, "max": 1246.6243896484375, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.5128286767519263, "min": 1.7330936201043048, "max": 3.8588886675793814, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 1468.3623868823051, "min": 204.50504717230797, "max": 1949.9008853435516, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.5128286767519263, "min": 1.7330936201043048, "max": 3.8588886675793814, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 1468.3623868823051, "min": 204.50504717230797, "max": 1949.9008853435516, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.01695822063096178, "min": 0.010819230472043272, "max": 0.020136092032043963, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.03391644126192356, "min": 0.021638460944086543, "max": 0.05567853579608102, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.04217116857568423, "min": 0.022969762422144414, "max": 0.060187627188861365, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.08434233715136846, "min": 0.04593952484428883, "max": 0.1666438510020574, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 4.299998566699996e-06, "min": 4.299998566699996e-06, "max": 0.00029532585155804997, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 8.599997133399992e-06, "min": 8.599997133399992e-06, "max": 0.0008439429186856999, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10143330000000002, "min": 0.10143330000000002, "max": 0.19844194999999998, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.20286660000000004, "min": 0.20286660000000004, "max": 0.5813143000000002, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 8.15216699999999e-05, "min": 8.15216699999999e-05, "max": 0.004922253305, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.0001630433399999998, "min": 0.0001630433399999998, "max": 0.01406758357, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1689643046", "python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics --force", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1689645483" }, "total": 2437.362532251, "count": 1, "self": 0.43919860300002256, "children": { "run_training.setup": { "total": 0.04297689999998511, "count": 1, "self": 0.04297689999998511 }, "TrainerController.start_learning": { "total": 2436.880356748, "count": 1, "self": 4.1452404299875525, "children": { "TrainerController._reset_env": { "total": 4.274714055999993, "count": 1, "self": 4.274714055999993 }, "TrainerController.advance": { "total": 2428.338778721012, "count": 231184, "self": 4.343830112994965, "children": { "env_step": { "total": 1882.4281347859123, "count": 231184, "self": 1585.0973545918687, "children": { "SubprocessEnvManager._take_step": { "total": 294.5814060229735, "count": 231184, "self": 16.801065353063905, "children": { "TorchPolicy.evaluate": { "total": 277.7803406699096, "count": 223082, "self": 277.7803406699096 } } }, "workers": { "total": 2.7493741710701443, "count": 231184, "self": 0.0, "children": { "worker_root": { "total": 2429.4339431220023, "count": 231184, "is_parallel": true, "self": 1131.0589455019556, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0009708769999861033, "count": 1, "is_parallel": true, "self": 0.0002448700000741155, "children": { "_process_rank_one_or_two_observation": { "total": 0.0007260069999119878, "count": 2, "is_parallel": true, "self": 0.0007260069999119878 } } }, "UnityEnvironment.step": { "total": 0.029511737999996512, "count": 1, "is_parallel": true, "self": 0.0002965089998951953, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00021244700008082873, "count": 1, "is_parallel": true, "self": 0.00021244700008082873 }, "communicator.exchange": { "total": 0.028244911999991018, "count": 1, "is_parallel": true, "self": 0.028244911999991018 }, "steps_from_proto": { "total": 0.0007578700000294702, "count": 1, "is_parallel": true, "self": 0.0002251660000638367, "children": { "_process_rank_one_or_two_observation": { "total": 0.0005327039999656336, "count": 2, "is_parallel": true, "self": 0.0005327039999656336 } } } } } } }, "UnityEnvironment.step": { "total": 1298.3749976200468, "count": 231183, "is_parallel": true, "self": 39.35788677896903, "children": { "UnityEnvironment._generate_step_input": { "total": 81.43864906705585, "count": 231183, "is_parallel": true, "self": 81.43864906705585 }, "communicator.exchange": { "total": 1079.6164219789594, "count": 231183, "is_parallel": true, "self": 1079.6164219789594 }, "steps_from_proto": { "total": 97.96203979506265, "count": 231183, "is_parallel": true, "self": 34.099597987981724, "children": { "_process_rank_one_or_two_observation": { "total": 63.86244180708093, "count": 462366, "is_parallel": true, "self": 63.86244180708093 } } } } } } } } } } }, "trainer_advance": { "total": 541.5668138221048, "count": 231184, "self": 6.376008609087194, "children": { "process_trajectory": { "total": 130.47483209601955, "count": 231184, "self": 129.1804990660189, "children": { "RLTrainer._checkpoint": { "total": 1.2943330300006437, "count": 10, "self": 1.2943330300006437 } } }, "_update_policy": { "total": 404.71597311699804, "count": 96, "self": 344.41619656299883, "children": { "TorchPPOOptimizer.update": { "total": 60.29977655399921, "count": 2880, "self": 60.29977655399921 } } } } } } }, "trainer_threads": { "total": 1.021000116452342e-06, "count": 1, "self": 1.021000116452342e-06 }, "TrainerController._save_models": { "total": 0.12162252000007356, "count": 1, "self": 0.0020565079998959845, "children": { "RLTrainer._checkpoint": { "total": 0.11956601200017758, "count": 1, "self": 0.11956601200017758 } } } } } } }