{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.4066622257232666, "min": 1.4066622257232666, "max": 1.427818775177002, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 70425.953125, "min": 68235.2890625, "max": 78378.171875, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 88.33035714285714, "min": 79.9126213592233, "max": 412.8264462809917, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49465.0, "min": 49179.0, "max": 50057.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999947.0, "min": 49730.0, "max": 1999947.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999947.0, "min": 49730.0, "max": 1999947.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.5221469402313232, "min": 0.12066003680229187, "max": 2.5221469402313232, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1412.4022216796875, "min": 14.479204177856445, "max": 1494.4752197265625, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.879204576675381, "min": 1.8721905663609504, "max": 3.9011061338015964, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 2172.3545629382133, "min": 224.66286796331406, "max": 2372.280670762062, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.879204576675381, "min": 1.8721905663609504, "max": 3.9011061338015964, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 2172.3545629382133, "min": 224.66286796331406, "max": 2372.280670762062, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.01538685063335126, "min": 0.013187940225664836, "max": 0.02075086094264407, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.04616055190005378, "min": 0.026375880451329672, "max": 0.05729427173791919, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.05478298320538468, "min": 0.02083754775424798, "max": 0.06382867706318696, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.16434894961615404, "min": 0.04167509550849596, "max": 0.1799172575275103, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.5050488316833393e-06, "min": 3.5050488316833393e-06, "max": 0.00029536102654632493, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 1.0515146495050018e-05, "min": 1.0515146495050018e-05, "max": 0.0008441098686300498, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10116831666666669, "min": 0.10116831666666669, "max": 0.19845367500000002, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.30350495000000005, "min": 0.20749850000000003, "max": 0.58136995, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 6.829900166666678e-05, "min": 6.829900166666678e-05, "max": 0.004922838382500001, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.00020489700500000033, "min": 0.00020489700500000033, "max": 0.014070360505, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1694700926", "python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1694703430" }, "total": 2504.2049436810003, "count": 1, "self": 0.43581357900029616, "children": { "run_training.setup": { "total": 0.04372563600003332, "count": 1, "self": 0.04372563600003332 }, "TrainerController.start_learning": { "total": 2503.725404466, "count": 1, "self": 4.485863849077305, "children": { "TrainerController._reset_env": { "total": 4.178164085999981, "count": 1, "self": 4.178164085999981 }, "TrainerController.advance": { "total": 2494.9153964989227, "count": 232301, "self": 4.575582361858324, "children": { "env_step": { "total": 1923.258742589085, "count": 232301, "self": 1629.9121740629937, "children": { "SubprocessEnvManager._take_step": { "total": 290.32662593605676, "count": 232301, "self": 16.611459937056566, "children": { "TorchPolicy.evaluate": { "total": 273.7151659990002, "count": 222981, "self": 273.7151659990002 } } }, "workers": { "total": 3.019942590034418, "count": 232301, "self": 0.0, "children": { "worker_root": { "total": 2496.074205594004, "count": 232301, "is_parallel": true, "self": 1161.8068197350894, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0008854599999494894, "count": 1, "is_parallel": true, "self": 0.0002637329999402027, "children": { "_process_rank_one_or_two_observation": { "total": 0.0006217270000092867, "count": 2, "is_parallel": true, "self": 0.0006217270000092867 } } }, "UnityEnvironment.step": { "total": 0.029902837999998155, "count": 1, "is_parallel": true, "self": 0.0003664889999299703, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00021846700002470243, "count": 1, "is_parallel": true, "self": 0.00021846700002470243 }, "communicator.exchange": { "total": 0.028506590000006327, "count": 1, "is_parallel": true, "self": 0.028506590000006327 }, "steps_from_proto": { "total": 0.000811292000037156, "count": 1, "is_parallel": true, "self": 0.0002406520000022283, "children": { "_process_rank_one_or_two_observation": { "total": 0.0005706400000349277, "count": 2, "is_parallel": true, "self": 0.0005706400000349277 } } } } } } }, "UnityEnvironment.step": { "total": 1334.2673858589146, "count": 232300, "is_parallel": true, "self": 40.79928550290242, "children": { "UnityEnvironment._generate_step_input": { "total": 82.43550674299871, "count": 232300, "is_parallel": true, "self": 82.43550674299871 }, "communicator.exchange": { "total": 1110.2262383179577, "count": 232300, "is_parallel": true, "self": 1110.2262383179577 }, "steps_from_proto": { "total": 100.80635529505565, "count": 232300, "is_parallel": true, "self": 35.65487255306954, "children": { "_process_rank_one_or_two_observation": { "total": 65.15148274198611, "count": 464600, "is_parallel": true, "self": 65.15148274198611 } } } } } } } } } } }, "trainer_advance": { "total": 567.0810715479795, "count": 232301, "self": 6.62584615112155, "children": { "process_trajectory": { "total": 142.13984919785912, "count": 232301, "self": 140.62650541985988, "children": { "RLTrainer._checkpoint": { "total": 1.5133437779992391, "count": 10, "self": 1.5133437779992391 } } }, "_update_policy": { "total": 418.31537619899876, "count": 97, "self": 357.35603478598443, "children": { "TorchPPOOptimizer.update": { "total": 60.95934141301433, "count": 2910, "self": 60.95934141301433 } } } } } } }, "trainer_threads": { "total": 1.1249999261053745e-06, "count": 1, "self": 1.1249999261053745e-06 }, "TrainerController._save_models": { "total": 0.1459789069999715, "count": 1, "self": 0.0019767029998547514, "children": { "RLTrainer._checkpoint": { "total": 0.14400220400011676, "count": 1, "self": 0.14400220400011676 } } } } } } }