{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.4009405374526978, "min": 1.4009405374526978, "max": 1.4300428628921509, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 70491.125, "min": 68710.8046875, "max": 78672.703125, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 86.19683655536028, "min": 80.4413680781759, "max": 387.6434108527132, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49046.0, "min": 49016.0, "max": 50353.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999963.0, "min": 49518.0, "max": 1999963.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999963.0, "min": 49518.0, "max": 1999963.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.392223834991455, "min": 0.14783312380313873, "max": 2.500406503677368, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1361.1754150390625, "min": 18.922639846801758, "max": 1439.838134765625, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.602500440051979, "min": 1.8791981327813119, "max": 3.9328686275836304, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 2049.822750389576, "min": 240.53736099600792, "max": 2277.130935370922, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.602500440051979, "min": 1.8791981327813119, "max": 3.9328686275836304, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 2049.822750389576, "min": 240.53736099600792, "max": 2277.130935370922, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.016940641839108945, "min": 0.012535270133392057, "max": 0.019697158142662374, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.05082192551732684, "min": 0.02651422555015112, "max": 0.05624104933328151, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.0536278850502438, "min": 0.02142168323819836, "max": 0.06740946099162101, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.1608836551507314, "min": 0.04284336647639672, "max": 0.19240630567073821, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.4228988590666594e-06, "min": 3.4228988590666594e-06, "max": 0.000295347676550775, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 1.0268696577199978e-05, "min": 1.0268696577199978e-05, "max": 0.0008437482187505997, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10114093333333335, "min": 0.10114093333333335, "max": 0.19844922500000003, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.30342280000000005, "min": 0.20748989999999995, "max": 0.5812494, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 6.69325733333332e-05, "min": 6.69325733333332e-05, "max": 0.0049226163275000005, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.00020079771999999963, "min": 0.00020079771999999963, "max": 0.014064345060000002, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1697083509", "python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.0.1+cu118", "numpy_version": "1.23.5", "end_time_seconds": "1697085869" }, "total": 2360.072349774, "count": 1, "self": 0.4272630370001025, "children": { "run_training.setup": { "total": 0.04603964799997584, "count": 1, "self": 0.04603964799997584 }, "TrainerController.start_learning": { "total": 2359.599047089, "count": 1, "self": 4.482779744987511, "children": { "TrainerController._reset_env": { "total": 7.238575672999957, "count": 1, "self": 7.238575672999957 }, "TrainerController.advance": { "total": 2347.7674632980124, "count": 232235, "self": 4.685020966970569, "children": { "env_step": { "total": 1864.71663018502, "count": 232235, "self": 1536.945246432026, "children": { "SubprocessEnvManager._take_step": { "total": 324.93035506897843, "count": 232235, "self": 17.224164888054418, "children": { "TorchPolicy.evaluate": { "total": 307.706190180924, "count": 223064, "self": 307.706190180924 } } }, "workers": { "total": 2.8410286840156687, "count": 232235, "self": 0.0, "children": { "worker_root": { "total": 2351.729741933046, "count": 232235, "is_parallel": true, "self": 1106.3261171519985, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0009223879999922246, "count": 1, "is_parallel": true, "self": 0.0003251240000281541, "children": { "_process_rank_one_or_two_observation": { "total": 0.0005972639999640705, "count": 2, "is_parallel": true, "self": 0.0005972639999640705 } } }, "UnityEnvironment.step": { "total": 0.03199449999999615, "count": 1, "is_parallel": true, "self": 0.00034216599993897034, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00023430500004906207, "count": 1, "is_parallel": true, "self": 0.00023430500004906207 }, "communicator.exchange": { "total": 0.030570994000015617, "count": 1, "is_parallel": true, "self": 0.030570994000015617 }, "steps_from_proto": { "total": 0.000847034999992502, "count": 1, "is_parallel": true, "self": 0.0002473320000149215, "children": { "_process_rank_one_or_two_observation": { "total": 0.0005997029999775805, "count": 2, "is_parallel": true, "self": 0.0005997029999775805 } } } } } } }, "UnityEnvironment.step": { "total": 1245.4036247810477, "count": 232234, "is_parallel": true, "self": 39.622216387158915, "children": { "UnityEnvironment._generate_step_input": { "total": 84.03421303103835, "count": 232234, "is_parallel": true, "self": 84.03421303103835 }, "communicator.exchange": { "total": 1031.4907129099295, "count": 232234, "is_parallel": true, "self": 1031.4907129099295 }, "steps_from_proto": { "total": 90.25648245292075, "count": 232234, "is_parallel": true, "self": 33.88946898185077, "children": { "_process_rank_one_or_two_observation": { "total": 56.36701347106998, "count": 464468, "is_parallel": true, "self": 56.36701347106998 } } } } } } } } } } }, "trainer_advance": { "total": 478.36581214602177, "count": 232235, "self": 6.466559857010395, "children": { "process_trajectory": { "total": 149.49874962301186, "count": 232235, "self": 148.33032135001162, "children": { "RLTrainer._checkpoint": { "total": 1.168428273000245, "count": 10, "self": 1.168428273000245 } } }, "_update_policy": { "total": 322.4005026659995, "count": 97, "self": 261.7891217080038, "children": { "TorchPPOOptimizer.update": { "total": 60.61138095799572, "count": 2910, "self": 60.61138095799572 } } } } } } }, "trainer_threads": { "total": 1.4350002857099753e-06, "count": 1, "self": 1.4350002857099753e-06 }, "TrainerController._save_models": { "total": 0.11022693799986882, "count": 1, "self": 0.0019872729999406147, "children": { "RLTrainer._checkpoint": { "total": 0.10823966499992821, "count": 1, "self": 0.10823966499992821 } } } } } } }