{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.4045740365982056, "min": 1.4045740365982056, "max": 1.4257943630218506, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 69776.4296875, "min": 69102.75, "max": 77413.125, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 94.28952380952381, "min": 79.0224, "max": 408.0967741935484, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49502.0, "min": 48757.0, "max": 50604.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999287.0, "min": 49978.0, "max": 1999287.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999287.0, "min": 49978.0, "max": 1999287.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.3389642238616943, "min": 0.043773628771305084, "max": 2.48293399810791, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1227.9561767578125, "min": 5.384156227111816, "max": 1468.1148681640625, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.6445503438086737, "min": 1.9958993220716956, "max": 4.044752264669699, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 1913.3889304995537, "min": 245.49561661481857, "max": 2384.304604768753, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.6445503438086737, "min": 1.9958993220716956, "max": 4.044752264669699, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 1913.3889304995537, "min": 245.49561661481857, "max": 2384.304604768753, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.01615026400433837, "min": 0.012370442170686146, "max": 0.01995338857717191, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.04845079201301511, "min": 0.024740884341372293, "max": 0.05986016573151573, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.04911420647468832, "min": 0.02164313479637106, "max": 0.05592957995831967, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.14734261942406496, "min": 0.04328626959274212, "max": 0.16696900961299738, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.414598861833331e-06, "min": 3.414598861833331e-06, "max": 0.0002953224765591749, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 1.0243796585499992e-05, "min": 1.0243796585499992e-05, "max": 0.00084385216871595, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10113816666666668, "min": 0.10113816666666668, "max": 0.19844082499999993, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.30341450000000003, "min": 0.20745349999999996, "max": 0.5812840499999999, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 6.679451666666666e-05, "min": 6.679451666666666e-05, "max": 0.004922197167499999, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.00020038354999999997, "min": 0.00020038354999999997, "max": 0.014066074094999999, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1692180850", "python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics --force", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1692183308" }, "total": 2457.769432139, "count": 1, "self": 0.47578943299959064, "children": { "run_training.setup": { "total": 0.04097376900006111, "count": 1, "self": 0.04097376900006111 }, "TrainerController.start_learning": { "total": 2457.252668937, "count": 1, "self": 4.437578391946772, "children": { "TrainerController._reset_env": { "total": 5.047630355000024, "count": 1, "self": 5.047630355000024 }, "TrainerController.advance": { "total": 2447.6463103370534, "count": 232657, "self": 4.564853917192977, "children": { "env_step": { "total": 1882.295947909987, "count": 232657, "self": 1590.6416479108598, "children": { "SubprocessEnvManager._take_step": { "total": 288.6011008620749, "count": 232657, "self": 17.1382244840122, "children": { "TorchPolicy.evaluate": { "total": 271.4628763780627, "count": 222936, "self": 271.4628763780627 } } }, "workers": { "total": 3.0531991370523883, "count": 232657, "self": 0.0, "children": { "worker_root": { "total": 2449.7510740139796, "count": 232657, "is_parallel": true, "self": 1150.7930673419864, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0008478490000243255, "count": 1, "is_parallel": true, "self": 0.00022168999998939398, "children": { "_process_rank_one_or_two_observation": { "total": 0.0006261590000349315, "count": 2, "is_parallel": true, "self": 0.0006261590000349315 } } }, "UnityEnvironment.step": { "total": 0.04685446500002399, "count": 1, "is_parallel": true, "self": 0.000314822999939679, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00021483499995156308, "count": 1, "is_parallel": true, "self": 0.00021483499995156308 }, "communicator.exchange": { "total": 0.04556960800005072, "count": 1, "is_parallel": true, "self": 0.04556960800005072 }, "steps_from_proto": { "total": 0.0007551990000820297, "count": 1, "is_parallel": true, "self": 0.0002243310000267229, "children": { "_process_rank_one_or_two_observation": { "total": 0.0005308680000553068, "count": 2, "is_parallel": true, "self": 0.0005308680000553068 } } } } } } }, "UnityEnvironment.step": { "total": 1298.9580066719932, "count": 232656, "is_parallel": true, "self": 40.03160453703367, "children": { "UnityEnvironment._generate_step_input": { "total": 82.06462574800969, "count": 232656, "is_parallel": true, "self": 82.06462574800969 }, "communicator.exchange": { "total": 1077.3165711189063, "count": 232656, "is_parallel": true, "self": 1077.3165711189063 }, "steps_from_proto": { "total": 99.54520526804356, "count": 232656, "is_parallel": true, "self": 35.13766240189375, "children": { "_process_rank_one_or_two_observation": { "total": 64.40754286614981, "count": 465312, "is_parallel": true, "self": 64.40754286614981 } } } } } } } } } } }, "trainer_advance": { "total": 560.7855085098732, "count": 232657, "self": 6.697554919865979, "children": { "process_trajectory": { "total": 142.1182167440055, "count": 232657, "self": 140.76245321900615, "children": { "RLTrainer._checkpoint": { "total": 1.355763524999361, "count": 10, "self": 1.355763524999361 } } }, "_update_policy": { "total": 411.96973684600175, "count": 97, "self": 351.8306739280023, "children": { "TorchPPOOptimizer.update": { "total": 60.13906291799947, "count": 2910, "self": 60.13906291799947 } } } } } } }, "trainer_threads": { "total": 9.750001481734216e-07, "count": 1, "self": 9.750001481734216e-07 }, "TrainerController._save_models": { "total": 0.12114887799998542, "count": 1, "self": 0.002545881000060035, "children": { "RLTrainer._checkpoint": { "total": 0.11860299699992538, "count": 1, "self": 0.11860299699992538 } } } } } } }