{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.404541015625, "min": 1.404541015625, "max": 1.4286199808120728, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 70600.65625, "min": 68202.0, "max": 76549.0390625, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 88.91592128801432, "min": 85.85069444444444, "max": 380.43939393939394, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49704.0, "min": 48813.0, "max": 50218.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999829.0, "min": 49601.0, "max": 1999829.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999829.0, "min": 49601.0, "max": 1999829.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.437941074371338, "min": 0.01884124055504799, "max": 2.4604134559631348, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1360.37109375, "min": 2.468202590942383, "max": 1389.80419921875, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.7984079069134156, "min": 1.787016050051187, "max": 3.918041879458553, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 2119.511612057686, "min": 234.09910255670547, "max": 2150.34184640646, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.7984079069134156, "min": 1.787016050051187, "max": 3.918041879458553, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 2119.511612057686, "min": 234.09910255670547, "max": 2150.34184640646, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.01641533482761588, "min": 0.014666735183507425, "max": 0.020396181303173458, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.04924600448284763, "min": 0.02982179233841483, "max": 0.059343468939187, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.05584721287919416, "min": 0.02375201437001427, "max": 0.05584721287919416, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.16754163863758248, "min": 0.04750402874002854, "max": 0.16754163863758248, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.321598892833332e-06, "min": 3.321598892833332e-06, "max": 0.00029528070157309994, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 9.964796678499997e-06, "min": 9.964796678499997e-06, "max": 0.0008439720186759999, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10110716666666668, "min": 0.10110716666666668, "max": 0.19842690000000007, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.3033215, "min": 0.2073619, "max": 0.5813240000000001, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 6.524761666666667e-05, "min": 6.524761666666667e-05, "max": 0.004921502309999999, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.00019574285000000002, "min": 0.00019574285000000002, "max": 0.014068067600000001, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1694905172", "python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1694907802" }, "total": 2629.320578841, "count": 1, "self": 0.4370878009999615, "children": { "run_training.setup": { "total": 0.07609936799997286, "count": 1, "self": 0.07609936799997286 }, "TrainerController.start_learning": { "total": 2628.807391672, "count": 1, "self": 4.999313134025215, "children": { "TrainerController._reset_env": { "total": 4.84070589800001, "count": 1, "self": 4.84070589800001 }, "TrainerController.advance": { "total": 2618.8383356729746, "count": 231926, "self": 4.964045135033302, "children": { "env_step": { "total": 2016.4162382639436, "count": 231926, "self": 1702.3380516659217, "children": { "SubprocessEnvManager._take_step": { "total": 310.8953661199545, "count": 231926, "self": 17.961502292967452, "children": { "TorchPolicy.evaluate": { "total": 292.933863826987, "count": 222938, "self": 292.933863826987 } } }, "workers": { "total": 3.182820478067242, "count": 231926, "self": 0.0, "children": { "worker_root": { "total": 2620.8091350550376, "count": 231926, "is_parallel": true, "self": 1232.5291175739108, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0013804510000454684, "count": 1, "is_parallel": true, "self": 0.000519207000024835, "children": { "_process_rank_one_or_two_observation": { "total": 0.0008612440000206334, "count": 2, "is_parallel": true, "self": 0.0008612440000206334 } } }, "UnityEnvironment.step": { "total": 0.030859850999945593, "count": 1, "is_parallel": true, "self": 0.00035127199998896685, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0002396309999994628, "count": 1, "is_parallel": true, "self": 0.0002396309999994628 }, "communicator.exchange": { "total": 0.02946205099999588, "count": 1, "is_parallel": true, "self": 0.02946205099999588 }, "steps_from_proto": { "total": 0.0008068969999612818, "count": 1, "is_parallel": true, "self": 0.00023426200004905695, "children": { "_process_rank_one_or_two_observation": { "total": 0.0005726349999122249, "count": 2, "is_parallel": true, "self": 0.0005726349999122249 } } } } } } }, "UnityEnvironment.step": { "total": 1388.2800174811268, "count": 231925, "is_parallel": true, "self": 42.985200280153094, "children": { "UnityEnvironment._generate_step_input": { "total": 85.71188899997048, "count": 231925, "is_parallel": true, "self": 85.71188899997048 }, "communicator.exchange": { "total": 1155.1442604320027, "count": 231925, "is_parallel": true, "self": 1155.1442604320027 }, "steps_from_proto": { "total": 104.4386677690004, "count": 231925, "is_parallel": true, "self": 37.0750142148778, "children": { "_process_rank_one_or_two_observation": { "total": 67.3636535541226, "count": 463850, "is_parallel": true, "self": 67.3636535541226 } } } } } } } } } } }, "trainer_advance": { "total": 597.4580522739976, "count": 231926, "self": 7.273692354841046, "children": { "process_trajectory": { "total": 145.99305437415603, "count": 231926, "self": 144.40893190615623, "children": { "RLTrainer._checkpoint": { "total": 1.584122467999805, "count": 10, "self": 1.584122467999805 } } }, "_update_policy": { "total": 444.19130554500055, "count": 97, "self": 381.0351562530019, "children": { "TorchPPOOptimizer.update": { "total": 63.156149291998645, "count": 2910, "self": 63.156149291998645 } } } } } } }, "trainer_threads": { "total": 9.780001164472196e-07, "count": 1, "self": 9.780001164472196e-07 }, "TrainerController._save_models": { "total": 0.12903598899993085, "count": 1, "self": 0.0024947160000010626, "children": { "RLTrainer._checkpoint": { "total": 0.1265412729999298, "count": 1, "self": 0.1265412729999298 } } } } } } }