{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.4077717065811157, "min": 1.4077717065811157, "max": 1.430090308189392, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 71379.65625, "min": 69459.03125, "max": 77832.796875, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 85.31260794473229, "min": 74.85151515151514, "max": 426.1779661016949, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49396.0, "min": 48874.0, "max": 50289.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999962.0, "min": 49987.0, "max": 1999962.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999962.0, "min": 49987.0, "max": 1999962.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.4085440635681152, "min": 0.08533310890197754, "max": 2.4759910106658936, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1394.5469970703125, "min": 9.983973503112793, "max": 1564.872802734375, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.74170178586749, "min": 1.74851012790305, "max": 4.099366782375218, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 2166.4453340172768, "min": 204.57568496465683, "max": 2484.9020253419876, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.74170178586749, "min": 1.74851012790305, "max": 4.099366782375218, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 2166.4453340172768, "min": 204.57568496465683, "max": 2484.9020253419876, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.014998338322271189, "min": 0.013094147973667229, "max": 0.01923316070266689, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.04499501496681357, "min": 0.026188295947334458, "max": 0.05761544112174306, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.05670261225766606, "min": 0.020814772695302963, "max": 0.06146399596085152, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.17010783677299818, "min": 0.041629545390605927, "max": 0.1716800007969141, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.509748830116664e-06, "min": 3.509748830116664e-06, "max": 0.00029538750153749994, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 1.0529246490349992e-05, "min": 1.0529246490349992e-05, "max": 0.0008442105185964999, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10116988333333332, "min": 0.10116988333333332, "max": 0.19846250000000004, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.30350964999999996, "min": 0.20749689999999996, "max": 0.5814035, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 6.83771783333333e-05, "min": 6.83771783333333e-05, "max": 0.004923278749999999, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.0002051315349999999, "min": 0.0002051315349999999, "max": 0.014072034649999999, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1694354901", "python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics --force", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1694357335" }, "total": 2433.850054669, "count": 1, "self": 0.42730162800080507, "children": { "run_training.setup": { "total": 0.03895400399960636, "count": 1, "self": 0.03895400399960636 }, "TrainerController.start_learning": { "total": 2433.383799037, "count": 1, "self": 4.34575657692767, "children": { "TrainerController._reset_env": { "total": 3.836951196999962, "count": 1, "self": 3.836951196999962 }, "TrainerController.advance": { "total": 2425.0777676430725, "count": 232676, "self": 4.414784320760191, "children": { "env_step": { "total": 1853.0544218521532, "count": 232676, "self": 1564.9912060761862, "children": { "SubprocessEnvManager._take_step": { "total": 285.1866737509981, "count": 232676, "self": 16.46966241922337, "children": { "TorchPolicy.evaluate": { "total": 268.71701133177476, "count": 223014, "self": 268.71701133177476 } } }, "workers": { "total": 2.876542024968785, "count": 232676, "self": 0.0, "children": { "worker_root": { "total": 2425.832766437204, "count": 232676, "is_parallel": true, "self": 1149.1085526563602, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0008199609997063817, "count": 1, "is_parallel": true, "self": 0.0002201329998570145, "children": { "_process_rank_one_or_two_observation": { "total": 0.0005998279998493672, "count": 2, "is_parallel": true, "self": 0.0005998279998493672 } } }, "UnityEnvironment.step": { "total": 0.028850290999798744, "count": 1, "is_parallel": true, "self": 0.0002973679997921863, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00023758699990139576, "count": 1, "is_parallel": true, "self": 0.00023758699990139576 }, "communicator.exchange": { "total": 0.027587302000029013, "count": 1, "is_parallel": true, "self": 0.027587302000029013 }, "steps_from_proto": { "total": 0.0007280340000761498, "count": 1, "is_parallel": true, "self": 0.0002181200002269179, "children": { "_process_rank_one_or_two_observation": { "total": 0.0005099139998492319, "count": 2, "is_parallel": true, "self": 0.0005099139998492319 } } } } } } }, "UnityEnvironment.step": { "total": 1276.724213780844, "count": 232675, "is_parallel": true, "self": 38.942182222363954, "children": { "UnityEnvironment._generate_step_input": { "total": 80.2568522401184, "count": 232675, "is_parallel": true, "self": 80.2568522401184 }, "communicator.exchange": { "total": 1060.2065792091416, "count": 232675, "is_parallel": true, "self": 1060.2065792091416 }, "steps_from_proto": { "total": 97.31860010921991, "count": 232675, "is_parallel": true, "self": 34.5907466513072, "children": { "_process_rank_one_or_two_observation": { "total": 62.727853457912715, "count": 465350, "is_parallel": true, "self": 62.727853457912715 } } } } } } } } } } }, "trainer_advance": { "total": 567.6085614701592, "count": 232676, "self": 6.509504214420758, "children": { "process_trajectory": { "total": 138.75106664173472, "count": 232676, "self": 137.4001871837354, "children": { "RLTrainer._checkpoint": { "total": 1.3508794579993264, "count": 10, "self": 1.3508794579993264 } } }, "_update_policy": { "total": 422.3479906140037, "count": 97, "self": 360.76326511400384, "children": { "TorchPPOOptimizer.update": { "total": 61.58472549999988, "count": 2910, "self": 61.58472549999988 } } } } } } }, "trainer_threads": { "total": 9.659997886046767e-07, "count": 1, "self": 9.659997886046767e-07 }, "TrainerController._save_models": { "total": 0.12332265399982134, "count": 1, "self": 0.002564888000051724, "children": { "RLTrainer._checkpoint": { "total": 0.12075776599976962, "count": 1, "self": 0.12075776599976962 } } } } } } }