{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.4007946252822876, "min": 1.4007946252822876, "max": 1.4244619607925415, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 70301.6796875, "min": 68532.0078125, "max": 77373.765625, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 83.0689075630252, "min": 81.92205638474296, "max": 395.24603174603175, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49426.0, "min": 48954.0, "max": 50251.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999952.0, "min": 49486.0, "max": 1999952.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999952.0, "min": 49486.0, "max": 1999952.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.4564108848571777, "min": 0.07789400219917297, "max": 2.4598281383514404, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1461.564453125, "min": 9.736750602722168, "max": 1461.564453125, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.881533115851779, "min": 1.7949020898342132, "max": 3.898856515647801, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 2309.5122039318085, "min": 224.36276122927666, "max": 2309.5122039318085, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.881533115851779, "min": 1.7949020898342132, "max": 3.898856515647801, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 2309.5122039318085, "min": 224.36276122927666, "max": 2309.5122039318085, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.01685232270101551, "min": 0.013600907929746123, "max": 0.020263363708121082, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.05055696810304652, "min": 0.027201815859492245, "max": 0.05583348457391063, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.059639810563789475, "min": 0.02267863132680456, "max": 0.06209559589624405, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.17891943169136842, "min": 0.04535726265360912, "max": 0.18005836457014085, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.6841987719666676e-06, "min": 3.6841987719666676e-06, "max": 0.00029525767658077496, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 1.1052596315900003e-05, "min": 1.1052596315900003e-05, "max": 0.0008440605186464997, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10122803333333334, "min": 0.10122803333333334, "max": 0.19841922499999998, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.3036841, "min": 0.20760645, "max": 0.5813535000000001, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 7.127886333333333e-05, "min": 7.127886333333333e-05, "max": 0.004921119327500001, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.00021383659000000002, "min": 0.00021383659000000002, "max": 0.014069539649999999, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1689134910", "python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --resume --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1689137480" }, "total": 2570.8234861, "count": 1, "self": 0.4881960010002331, "children": { "run_training.setup": { "total": 0.04019226799999842, "count": 1, "self": 0.04019226799999842 }, "TrainerController.start_learning": { "total": 2570.295097831, "count": 1, "self": 4.742990014043244, "children": { "TrainerController._reset_env": { "total": 3.9216016320000335, "count": 1, "self": 3.9216016320000335 }, "TrainerController.advance": { "total": 2561.4996905739567, "count": 232726, "self": 4.947041924950554, "children": { "env_step": { "total": 2004.7689014469688, "count": 232726, "self": 1690.2553698989836, "children": { "SubprocessEnvManager._take_step": { "total": 311.4687730379859, "count": 232726, "self": 17.412582625013442, "children": { "TorchPolicy.evaluate": { "total": 294.05619041297246, "count": 222944, "self": 294.05619041297246 } } }, "workers": { "total": 3.0447585099992693, "count": 232726, "self": 0.0, "children": { "worker_root": { "total": 2562.306909004981, "count": 232726, "is_parallel": true, "self": 1178.0328194530562, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0007608850000337952, "count": 1, "is_parallel": true, "self": 0.0002482410000084201, "children": { "_process_rank_one_or_two_observation": { "total": 0.0005126440000253751, "count": 2, "is_parallel": true, "self": 0.0005126440000253751 } } }, "UnityEnvironment.step": { "total": 0.031300409999971635, "count": 1, "is_parallel": true, "self": 0.0003005099999882077, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00027189200000066194, "count": 1, "is_parallel": true, "self": 0.00027189200000066194 }, "communicator.exchange": { "total": 0.029935275000013917, "count": 1, "is_parallel": true, "self": 0.029935275000013917 }, "steps_from_proto": { "total": 0.0007927329999688482, "count": 1, "is_parallel": true, "self": 0.00025737699996852825, "children": { "_process_rank_one_or_two_observation": { "total": 0.0005353560000003199, "count": 2, "is_parallel": true, "self": 0.0005353560000003199 } } } } } } }, "UnityEnvironment.step": { "total": 1384.274089551925, "count": 232725, "is_parallel": true, "self": 40.30080528282883, "children": { "UnityEnvironment._generate_step_input": { "total": 86.75832462701123, "count": 232725, "is_parallel": true, "self": 86.75832462701123 }, "communicator.exchange": { "total": 1155.4444685029512, "count": 232725, "is_parallel": true, "self": 1155.4444685029512 }, "steps_from_proto": { "total": 101.77049113913387, "count": 232725, "is_parallel": true, "self": 38.18609938728338, "children": { "_process_rank_one_or_two_observation": { "total": 63.58439175185049, "count": 465450, "is_parallel": true, "self": 63.58439175185049 } } } } } } } } } } }, "trainer_advance": { "total": 551.7837472020371, "count": 232726, "self": 7.000872762205745, "children": { "process_trajectory": { "total": 147.02330047383197, "count": 232726, "self": 145.5895045668314, "children": { "RLTrainer._checkpoint": { "total": 1.4337959070005581, "count": 10, "self": 1.4337959070005581 } } }, "_update_policy": { "total": 397.7595739659995, "count": 97, "self": 336.7156471390027, "children": { "TorchPPOOptimizer.update": { "total": 61.04392682699677, "count": 2910, "self": 61.04392682699677 } } } } } } }, "trainer_threads": { "total": 1.0759999895526562e-06, "count": 1, "self": 1.0759999895526562e-06 }, "TrainerController._save_models": { "total": 0.13081453499989948, "count": 1, "self": 0.005544149999877845, "children": { "RLTrainer._checkpoint": { "total": 0.12527038500002163, "count": 1, "self": 0.12527038500002163 } } } } } } }