{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.4078433513641357, "min": 1.4078433513641357, "max": 1.4283294677734375, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 69517.8984375, "min": 68802.9375, "max": 78679.8828125, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 77.26635514018692, "min": 73.6965620328849, "max": 392.7244094488189, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49605.0, "min": 49032.0, "max": 50011.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999504.0, "min": 49485.0, "max": 1999504.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999504.0, "min": 49485.0, "max": 1999504.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.49794864654541, "min": 0.03890785574913025, "max": 2.5023577213287354, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1603.68310546875, "min": 4.902390003204346, "max": 1628.30322265625, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.9301935990465764, "min": 1.655285293384204, "max": 3.979968377847991, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 2523.184290587902, "min": 208.56594696640968, "max": 2584.330558717251, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.9301935990465764, "min": 1.655285293384204, "max": 3.979968377847991, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 2523.184290587902, "min": 208.56594696640968, "max": 2584.330558717251, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.015876063404721207, "min": 0.013663223802965755, "max": 0.01994380267181744, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.04762819021416362, "min": 0.02732644760593151, "max": 0.057635893192491496, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.061027938789791535, "min": 0.023688278430038026, "max": 0.061027938789791535, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.1830838163693746, "min": 0.04915596519907316, "max": 0.1830838163693746, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 4.165998611366666e-06, "min": 4.165998611366666e-06, "max": 0.00029535795154734997, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 1.2497995834099999e-05, "min": 1.2497995834099999e-05, "max": 0.0008441454186182001, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10138863333333331, "min": 0.10138863333333331, "max": 0.19845265, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.30416589999999993, "min": 0.20790430000000004, "max": 0.5813818000000001, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 7.929280333333337e-05, "min": 7.929280333333337e-05, "max": 0.004922787235, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.0002378784100000001, "min": 0.0002378784100000001, "max": 0.01407095182, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1682883702", "python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.22.4", "end_time_seconds": "1682886053" }, "total": 2350.372577194, "count": 1, "self": 0.44942143699972803, "children": { "run_training.setup": { "total": 0.0658441420000031, "count": 1, "self": 0.0658441420000031 }, "TrainerController.start_learning": { "total": 2349.857311615, "count": 1, "self": 4.290291339976648, "children": { "TrainerController._reset_env": { "total": 4.807605375000037, "count": 1, "self": 4.807605375000037 }, "TrainerController.advance": { "total": 2340.6309778540235, "count": 233251, "self": 4.422248386928459, "children": { "env_step": { "total": 1811.6110023520464, "count": 233251, "self": 1524.9580970200361, "children": { "SubprocessEnvManager._take_step": { "total": 283.9562158109872, "count": 233251, "self": 16.027052866986764, "children": { "TorchPolicy.evaluate": { "total": 267.92916294400044, "count": 222905, "self": 267.92916294400044 } } }, "workers": { "total": 2.6966895210231314, "count": 233251, "self": 0.0, "children": { "worker_root": { "total": 2342.193477564002, "count": 233251, "is_parallel": true, "self": 1098.0174504110328, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0009305039999958353, "count": 1, "is_parallel": true, "self": 0.0003086899999971138, "children": { "_process_rank_one_or_two_observation": { "total": 0.0006218139999987216, "count": 2, "is_parallel": true, "self": 0.0006218139999987216 } } }, "UnityEnvironment.step": { "total": 0.05169092599999203, "count": 1, "is_parallel": true, "self": 0.0003414839999322794, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00022016600001961706, "count": 1, "is_parallel": true, "self": 0.00022016600001961706 }, "communicator.exchange": { "total": 0.05048901800000749, "count": 1, "is_parallel": true, "self": 0.05048901800000749 }, "steps_from_proto": { "total": 0.0006402580000326452, "count": 1, "is_parallel": true, "self": 0.0001951750000444008, "children": { "_process_rank_one_or_two_observation": { "total": 0.0004450829999882444, "count": 2, "is_parallel": true, "self": 0.0004450829999882444 } } } } } } }, "UnityEnvironment.step": { "total": 1244.1760271529693, "count": 233250, "is_parallel": true, "self": 36.226143749834364, "children": { "UnityEnvironment._generate_step_input": { "total": 78.14576457894492, "count": 233250, "is_parallel": true, "self": 78.14576457894492 }, "communicator.exchange": { "total": 1042.177256126053, "count": 233250, "is_parallel": true, "self": 1042.177256126053 }, "steps_from_proto": { "total": 87.62686269813707, "count": 233250, "is_parallel": true, "self": 32.43182811327091, "children": { "_process_rank_one_or_two_observation": { "total": 55.195034584866164, "count": 466500, "is_parallel": true, "self": 55.195034584866164 } } } } } } } } } } }, "trainer_advance": { "total": 524.5977271150487, "count": 233251, "self": 6.407346224067396, "children": { "process_trajectory": { "total": 134.44092742798136, "count": 233251, "self": 133.01749157398206, "children": { "RLTrainer._checkpoint": { "total": 1.4234358539993082, "count": 10, "self": 1.4234358539993082 } } }, "_update_policy": { "total": 383.74945346299984, "count": 97, "self": 323.3731510000023, "children": { "TorchPPOOptimizer.update": { "total": 60.37630246299756, "count": 2910, "self": 60.37630246299756 } } } } } } }, "trainer_threads": { "total": 9.66999778029276e-07, "count": 1, "self": 9.66999778029276e-07 }, "TrainerController._save_models": { "total": 0.12843607900003917, "count": 1, "self": 0.0022181259996614244, "children": { "RLTrainer._checkpoint": { "total": 0.12621795300037775, "count": 1, "self": 0.12621795300037775 } } } } } } }