{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.4064778089523315, "min": 1.4064778089523315, "max": 1.4275355339050293, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 72453.296875, "min": 69397.7421875, "max": 75803.6171875, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 78.904, "min": 76.96567862714508, "max": 417.3, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49315.0, "min": 49136.0, "max": 50076.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999980.0, "min": 49487.0, "max": 1999980.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999980.0, "min": 49487.0, "max": 1999980.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.5192384719848633, "min": 0.11404948681592941, "max": 2.5192384719848633, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1574.5240478515625, "min": 13.57188892364502, "max": 1574.5240478515625, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.9313481125831604, "min": 1.8862606362134469, "max": 3.9529250996311505, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 2457.0925703644753, "min": 224.46501570940018, "max": 2491.9097706079483, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.9313481125831604, "min": 1.8862606362134469, "max": 3.9529250996311505, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 2457.0925703644753, "min": 224.46501570940018, "max": 2491.9097706079483, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.018095946176941245, "min": 0.013655200205458741, "max": 0.019930623746404308, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.05428783853082374, "min": 0.02843301209844261, "max": 0.05757073855832763, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.05375185662673579, "min": 0.020920950764169298, "max": 0.06089739228288333, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.16125556988020737, "min": 0.041841901528338596, "max": 0.17525870924194653, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.7981987339666605e-06, "min": 3.7981987339666605e-06, "max": 0.000295361176546275, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 1.1394596201899981e-05, "min": 1.1394596201899981e-05, "max": 0.0008442678185773999, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10126603333333335, "min": 0.10126603333333335, "max": 0.19845372500000005, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.30379810000000007, "min": 0.2076669, "max": 0.5814226, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 7.317506333333324e-05, "min": 7.317506333333324e-05, "max": 0.0049228408775, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.0002195251899999997, "min": 0.0002195251899999997, "max": 0.014072987739999997, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1689084543", "python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1689086945" }, "total": 2401.599477302, "count": 1, "self": 0.7681226090003292, "children": { "run_training.setup": { "total": 0.043649719999962144, "count": 1, "self": 0.043649719999962144 }, "TrainerController.start_learning": { "total": 2400.787704973, "count": 1, "self": 4.338504134035247, "children": { "TrainerController._reset_env": { "total": 4.153107961999922, "count": 1, "self": 4.153107961999922 }, "TrainerController.advance": { "total": 2392.104774594965, "count": 233020, "self": 4.624011921001966, "children": { "env_step": { "total": 1859.4847568530183, "count": 233020, "self": 1563.0227080078341, "children": { "SubprocessEnvManager._take_step": { "total": 293.620722189102, "count": 233020, "self": 16.745428461077267, "children": { "TorchPolicy.evaluate": { "total": 276.87529372802476, "count": 223037, "self": 276.87529372802476 } } }, "workers": { "total": 2.841326656082174, "count": 233020, "self": 0.0, "children": { "worker_root": { "total": 2393.0409406119456, "count": 233020, "is_parallel": true, "self": 1120.1847433178814, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0008662420000291604, "count": 1, "is_parallel": true, "self": 0.00023489599993808952, "children": { "_process_rank_one_or_two_observation": { "total": 0.0006313460000910709, "count": 2, "is_parallel": true, "self": 0.0006313460000910709 } } }, "UnityEnvironment.step": { "total": 0.030118292999986807, "count": 1, "is_parallel": true, "self": 0.0003578759999527392, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00021220700000412762, "count": 1, "is_parallel": true, "self": 0.00021220700000412762 }, "communicator.exchange": { "total": 0.02871097499996722, "count": 1, "is_parallel": true, "self": 0.02871097499996722 }, "steps_from_proto": { "total": 0.0008372350000627193, "count": 1, "is_parallel": true, "self": 0.0002175330000682152, "children": { "_process_rank_one_or_two_observation": { "total": 0.0006197019999945041, "count": 2, "is_parallel": true, "self": 0.0006197019999945041 } } } } } } }, "UnityEnvironment.step": { "total": 1272.8561972940643, "count": 233019, "is_parallel": true, "self": 39.651641161894304, "children": { "UnityEnvironment._generate_step_input": { "total": 78.3536667521198, "count": 233019, "is_parallel": true, "self": 78.3536667521198 }, "communicator.exchange": { "total": 1057.7548383090666, "count": 233019, "is_parallel": true, "self": 1057.7548383090666 }, "steps_from_proto": { "total": 97.09605107098355, "count": 233019, "is_parallel": true, "self": 33.55678335004575, "children": { "_process_rank_one_or_two_observation": { "total": 63.5392677209378, "count": 466038, "is_parallel": true, "self": 63.5392677209378 } } } } } } } } } } }, "trainer_advance": { "total": 527.9960058209448, "count": 233020, "self": 6.585274006929126, "children": { "process_trajectory": { "total": 138.80061179801635, "count": 233020, "self": 137.36578242701603, "children": { "RLTrainer._checkpoint": { "total": 1.434829371000319, "count": 10, "self": 1.434829371000319 } } }, "_update_policy": { "total": 382.6101200159993, "count": 97, "self": 322.28540209899006, "children": { "TorchPPOOptimizer.update": { "total": 60.32471791700925, "count": 2910, "self": 60.32471791700925 } } } } } } }, "trainer_threads": { "total": 1.3639996723213699e-06, "count": 1, "self": 1.3639996723213699e-06 }, "TrainerController._save_models": { "total": 0.19131691800021144, "count": 1, "self": 0.0026984790001733927, "children": { "RLTrainer._checkpoint": { "total": 0.18861843900003805, "count": 1, "self": 0.18861843900003805 } } } } } } }