{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.4033770561218262, "min": 1.4033770561218262, "max": 1.427294135093689, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 69382.9609375, "min": 68813.265625, "max": 72650.609375, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 81.12335526315789, "min": 76.19444444444444, "max": 429.45098039215685, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49323.0, "min": 43804.0, "max": 49939.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999963.0, "min": 49863.0, "max": 1999963.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999963.0, "min": 49863.0, "max": 1999963.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.4667789936065674, "min": 0.13001984357833862, "max": 2.504162073135376, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1499.8016357421875, "min": 13.132004737854004, "max": 1585.1949462890625, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.7616876949997327, "min": 1.9015545968962189, "max": 4.032258599133467, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 2287.1061185598373, "min": 192.0570142865181, "max": 2476.79012131691, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.7616876949997327, "min": 1.9015545968962189, "max": 4.032258599133467, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 2287.1061185598373, "min": 192.0570142865181, "max": 2476.79012131691, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.014245936596368362, "min": 0.012848757598233836, "max": 0.020061773486021492, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.028491873192736723, "min": 0.025697515196467673, "max": 0.060185320458064476, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.06045801813403765, "min": 0.023103836458176374, "max": 0.06285114518056313, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.1209160362680753, "min": 0.04620767291635275, "max": 0.17569356883565584, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 4.1956736014750014e-06, "min": 4.1956736014750014e-06, "max": 0.0002943724518758499, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 8.391347202950003e-06, "min": 8.391347202950003e-06, "max": 0.0008225539758153497, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10139852500000002, "min": 0.10139852500000002, "max": 0.19812415, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.20279705000000003, "min": 0.20279705000000003, "max": 0.5741846499999999, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 7.978639750000002e-05, "min": 7.978639750000002e-05, "max": 0.004906395085, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.00015957279500000005, "min": 0.00015957279500000005, "max": 0.013711814035, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1693384349", "python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics --resume", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1693386886" }, "total": 2537.333778018, "count": 1, "self": 0.429311848999987, "children": { "run_training.setup": { "total": 0.03817904999993971, "count": 1, "self": 0.03817904999993971 }, "TrainerController.start_learning": { "total": 2536.866287119, "count": 1, "self": 4.726108314004705, "children": { "TrainerController._reset_env": { "total": 3.693298390999985, "count": 1, "self": 3.693298390999985 }, "TrainerController.advance": { "total": 2528.3304643579954, "count": 232366, "self": 4.734137074844057, "children": { "env_step": { "total": 1972.1259194970219, "count": 232366, "self": 1666.1726032370943, "children": { "SubprocessEnvManager._take_step": { "total": 302.8115559459293, "count": 232366, "self": 16.98734517104947, "children": { "TorchPolicy.evaluate": { "total": 285.8242107748798, "count": 222250, "self": 285.8242107748798 } } }, "workers": { "total": 3.141760313998361, "count": 232366, "self": 0.0, "children": { "worker_root": { "total": 2529.0090789960987, "count": 232366, "is_parallel": true, "self": 1168.0993014959877, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0007836910000378339, "count": 1, "is_parallel": true, "self": 0.00022083999999722437, "children": { "_process_rank_one_or_two_observation": { "total": 0.0005628510000406095, "count": 2, "is_parallel": true, "self": 0.0005628510000406095 } } }, "UnityEnvironment.step": { "total": 0.03118213100003686, "count": 1, "is_parallel": true, "self": 0.00032168000007004593, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00021443899993300874, "count": 1, "is_parallel": true, "self": 0.00021443899993300874 }, "communicator.exchange": { "total": 0.029791109000029792, "count": 1, "is_parallel": true, "self": 0.029791109000029792 }, "steps_from_proto": { "total": 0.0008549030000040148, "count": 1, "is_parallel": true, "self": 0.000244400000042333, "children": { "_process_rank_one_or_two_observation": { "total": 0.0006105029999616818, "count": 2, "is_parallel": true, "self": 0.0006105029999616818 } } } } } } }, "UnityEnvironment.step": { "total": 1360.909777500111, "count": 232365, "is_parallel": true, "self": 40.015064612883634, "children": { "UnityEnvironment._generate_step_input": { "total": 86.50023730413955, "count": 232365, "is_parallel": true, "self": 86.50023730413955 }, "communicator.exchange": { "total": 1132.15015699703, "count": 232365, "is_parallel": true, "self": 1132.15015699703 }, "steps_from_proto": { "total": 102.24431858605783, "count": 232365, "is_parallel": true, "self": 38.79929055525167, "children": { "_process_rank_one_or_two_observation": { "total": 63.44502803080616, "count": 464730, "is_parallel": true, "self": 63.44502803080616 } } } } } } } } } } }, "trainer_advance": { "total": 551.4704077861295, "count": 232366, "self": 6.792885573101557, "children": { "process_trajectory": { "total": 148.97006250902916, "count": 232366, "self": 147.31681697302884, "children": { "RLTrainer._checkpoint": { "total": 1.6532455360003269, "count": 10, "self": 1.6532455360003269 } } }, "_update_policy": { "total": 395.7074597039988, "count": 96, "self": 336.71247884800937, "children": { "TorchPPOOptimizer.update": { "total": 58.99498085598941, "count": 2880, "self": 58.99498085598941 } } } } } } }, "trainer_threads": { "total": 1.0009998732130043e-06, "count": 1, "self": 1.0009998732130043e-06 }, "TrainerController._save_models": { "total": 0.11641505499983396, "count": 1, "self": 0.0027704320000339067, "children": { "RLTrainer._checkpoint": { "total": 0.11364462299980005, "count": 1, "self": 0.11364462299980005 } } } } } } }