{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.4027904272079468, "min": 1.4027881622314453, "max": 1.43047034740448, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 69001.859375, "min": 68814.265625, "max": 76957.46875, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 81.30756578947368, "min": 73.10518518518519, "max": 420.4621848739496, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49435.0, "min": 48885.0, "max": 50035.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999982.0, "min": 49594.0, "max": 1999982.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999982.0, "min": 49594.0, "max": 1999982.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.4560935497283936, "min": 0.046316687017679214, "max": 2.531996965408325, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1493.304931640625, "min": 5.46536922454834, "max": 1660.152587890625, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.760204971326809, "min": 1.8719434909901376, "max": 4.092774959351016, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 2286.2046225667, "min": 220.88933193683624, "max": 2658.624912559986, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.760204971326809, "min": 1.8719434909901376, "max": 4.092774959351016, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 2286.2046225667, "min": 220.88933193683624, "max": 2658.624912559986, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.014942816824703997, "min": 0.014190811279695481, "max": 0.02008749653529473, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.04482845047411199, "min": 0.028381622559390963, "max": 0.058644419775616075, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.060983966663479806, "min": 0.022331176263590654, "max": 0.06224406938999892, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.18295189999043943, "min": 0.04466235252718131, "max": 0.18295189999043943, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.892798702433337e-06, "min": 3.892798702433337e-06, "max": 0.00029532442655852496, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 1.1678396107300012e-05, "min": 1.1678396107300012e-05, "max": 0.0008440783686405499, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10129756666666667, "min": 0.10129756666666667, "max": 0.19844147500000003, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.3038927, "min": 0.2077304, "max": 0.58135945, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 7.474857666666674e-05, "min": 7.474857666666674e-05, "max": 0.0049222296025, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.0002242457300000002, "min": 0.0002242457300000002, "max": 0.014069836555000001, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1689868680", "python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1689871335" }, "total": 2654.0609770409997, "count": 1, "self": 0.48964821199933795, "children": { "run_training.setup": { "total": 0.03518905399999994, "count": 1, "self": 0.03518905399999994 }, "TrainerController.start_learning": { "total": 2653.5361397750003, "count": 1, "self": 4.75381857894854, "children": { "TrainerController._reset_env": { "total": 5.6409195379999915, "count": 1, "self": 5.6409195379999915 }, "TrainerController.advance": { "total": 2643.006800399052, "count": 233098, "self": 5.0878042628896765, "children": { "env_step": { "total": 2053.607683585125, "count": 233098, "self": 1732.6860400662054, "children": { "SubprocessEnvManager._take_step": { "total": 317.6771005379537, "count": 233098, "self": 18.096030396013703, "children": { "TorchPolicy.evaluate": { "total": 299.58107014194, "count": 222962, "self": 299.58107014194 } } }, "workers": { "total": 3.2445429809660027, "count": 233098, "self": 0.0, "children": { "worker_root": { "total": 2645.2874720739537, "count": 233098, "is_parallel": true, "self": 1233.8233218779271, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0010565929999870605, "count": 1, "is_parallel": true, "self": 0.0003379929999596243, "children": { "_process_rank_one_or_two_observation": { "total": 0.0007186000000274362, "count": 2, "is_parallel": true, "self": 0.0007186000000274362 } } }, "UnityEnvironment.step": { "total": 0.030157670000050985, "count": 1, "is_parallel": true, "self": 0.0003349310000544392, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0002173029999994469, "count": 1, "is_parallel": true, "self": 0.0002173029999994469 }, "communicator.exchange": { "total": 0.02887318400001959, "count": 1, "is_parallel": true, "self": 0.02887318400001959 }, "steps_from_proto": { "total": 0.0007322519999775068, "count": 1, "is_parallel": true, "self": 0.00021182699993005372, "children": { "_process_rank_one_or_two_observation": { "total": 0.0005204250000474531, "count": 2, "is_parallel": true, "self": 0.0005204250000474531 } } } } } } }, "UnityEnvironment.step": { "total": 1411.4641501960266, "count": 233097, "is_parallel": true, "self": 42.725333303001435, "children": { "UnityEnvironment._generate_step_input": { "total": 90.98628575102646, "count": 233097, "is_parallel": true, "self": 90.98628575102646 }, "communicator.exchange": { "total": 1173.2175407979785, "count": 233097, "is_parallel": true, "self": 1173.2175407979785 }, "steps_from_proto": { "total": 104.53499034402006, "count": 233097, "is_parallel": true, "self": 40.715811074134024, "children": { "_process_rank_one_or_two_observation": { "total": 63.819179269886035, "count": 466194, "is_parallel": true, "self": 63.819179269886035 } } } } } } } } } } }, "trainer_advance": { "total": 584.3113125510372, "count": 233098, "self": 7.078875267011085, "children": { "process_trajectory": { "total": 155.9958927280249, "count": 233098, "self": 154.51591637302528, "children": { "RLTrainer._checkpoint": { "total": 1.4799763549996214, "count": 10, "self": 1.4799763549996214 } } }, "_update_policy": { "total": 421.2365445560012, "count": 97, "self": 359.27599139599494, "children": { "TorchPPOOptimizer.update": { "total": 61.96055316000627, "count": 2910, "self": 61.96055316000627 } } } } } } }, "trainer_threads": { "total": 1.005999820336001e-06, "count": 1, "self": 1.005999820336001e-06 }, "TrainerController._save_models": { "total": 0.13460025299991685, "count": 1, "self": 0.003002243999617349, "children": { "RLTrainer._checkpoint": { "total": 0.1315980090002995, "count": 1, "self": 0.1315980090002995 } } } } } } }