{ "name": "root", "gauges": { "SoccerTwos.Policy.Entropy.mean": { "value": 3.1492481231689453, "min": 3.0758795738220215, "max": 3.295753240585327, "count": 177 }, "SoccerTwos.Policy.Entropy.sum": { "value": 96039.46875, "min": 26732.84765625, "max": 118224.03125, "count": 177 }, "SoccerTwos.Environment.EpisodeLength.mean": { "value": 397.35714285714283, "min": 397.35714285714283, "max": 999.0, "count": 177 }, "SoccerTwos.Environment.EpisodeLength.sum": { "value": 22252.0, "min": 12136.0, "max": 29268.0, "count": 177 }, "SoccerTwos.Self-play.ELO.mean": { "value": 1198.0363421079394, "min": 1190.1687248348308, "max": 1208.4195123584557, "count": 140 }, "SoccerTwos.Self-play.ELO.sum": { "value": 28752.872210590544, "min": 2384.0548197655817, "max": 28752.872210590544, "count": 140 }, "SoccerTwos.Step.mean": { "value": 1769404.0, "min": 9652.0, "max": 1769404.0, "count": 177 }, "SoccerTwos.Step.sum": { "value": 1769404.0, "min": 9652.0, "max": 1769404.0, "count": 177 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": { "value": -0.017452320083975792, "min": -0.0393872931599617, "max": 0.08889803290367126, "count": 177 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": { "value": -0.48866498470306396, "min": -0.5908094048500061, "max": 1.600164532661438, "count": 177 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.mean": { "value": -0.01416406873613596, "min": -0.04078492149710655, "max": 0.08889806270599365, "count": 177 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.sum": { "value": -0.39659392833709717, "min": -0.5825746059417725, "max": 1.6001651287078857, "count": 177 }, "SoccerTwos.Environment.CumulativeReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 177 }, "SoccerTwos.Environment.CumulativeReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 177 }, "SoccerTwos.Policy.ExtrinsicReward.mean": { "value": -0.18195714269365584, "min": -0.5725999996066093, "max": 0.3800000101327896, "count": 177 }, "SoccerTwos.Policy.ExtrinsicReward.sum": { "value": -5.094799995422363, "min": -9.16159999370575, "max": 6.080000162124634, "count": 177 }, "SoccerTwos.Environment.GroupCumulativeReward.mean": { "value": -0.18195714269365584, "min": -0.5725999996066093, "max": 0.3800000101327896, "count": 177 }, "SoccerTwos.Environment.GroupCumulativeReward.sum": { "value": -5.094799995422363, "min": -9.16159999370575, "max": 6.080000162124634, "count": 177 }, "SoccerTwos.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 177 }, "SoccerTwos.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 177 }, "SoccerTwos.Losses.PolicyLoss.mean": { "value": 0.013993485739532237, "min": 0.0112215274052384, "max": 0.02079936771187931, "count": 82 }, "SoccerTwos.Losses.PolicyLoss.sum": { "value": 0.013993485739532237, "min": 0.0112215274052384, "max": 0.02079936771187931, "count": 82 }, "SoccerTwos.Losses.ValueLoss.mean": { "value": 0.0032672046761338907, "min": 9.119491911254348e-07, "max": 0.008497938343013326, "count": 82 }, "SoccerTwos.Losses.ValueLoss.sum": { "value": 0.0032672046761338907, "min": 9.119491911254348e-07, "max": 0.008497938343013326, "count": 82 }, "SoccerTwos.Losses.BaselineLoss.mean": { "value": 0.0033054392163952193, "min": 1.2390629365199856e-06, "max": 0.008588151183600228, "count": 82 }, "SoccerTwos.Losses.BaselineLoss.sum": { "value": 0.0033054392163952193, "min": 1.2390629365199856e-06, "max": 0.008588151183600228, "count": 82 }, "SoccerTwos.Policy.LearningRate.mean": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 82 }, "SoccerTwos.Policy.LearningRate.sum": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 82 }, "SoccerTwos.Policy.Epsilon.mean": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 82 }, "SoccerTwos.Policy.Epsilon.sum": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 82 }, "SoccerTwos.Policy.Beta.mean": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 82 }, "SoccerTwos.Policy.Beta.sum": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 82 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1690910984", "python_version": "3.9.17 (main, Jun 6 2023, 14:33:55) \n[Clang 14.0.3 (clang-1403.0.22.14.1)]", "command_line_arguments": "/opt/homebrew/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=/Users/joaquinarias/Documents/Courses/AIvsAI/ml-agents/training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics --force", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0", "numpy_version": "1.21.2", "end_time_seconds": "1690917113" }, "total": 6128.712486542, "count": 1, "self": 0.11570175100041524, "children": { "run_training.setup": { "total": 0.02797345799999995, "count": 1, "self": 0.02797345799999995 }, "TrainerController.start_learning": { "total": 6128.568811333, "count": 1, "self": 1.2377622870471896, "children": { "TrainerController._reset_env": { "total": 2.1199782109995664, "count": 9, "self": 2.1199782109995664 }, "TrainerController.advance": { "total": 6125.095382209953, "count": 115567, "self": 1.0805010598187437, "children": { "env_step": { "total": 4953.185838634077, "count": 115567, "self": 4758.263339092997, "children": { "SubprocessEnvManager._take_step": { "total": 194.12497185594665, "count": 115567, "self": 5.602700949108282, "children": { "TorchPolicy.evaluate": { "total": 188.52227090683837, "count": 229258, "self": 188.52227090683837 } } }, "workers": { "total": 0.7975276851334598, "count": 115566, "self": 0.0, "children": { "worker_root": { "total": 6124.727093669969, "count": 115566, "is_parallel": true, "self": 1548.8416130130418, "children": { "steps_from_proto": { "total": 0.011303708999819806, "count": 18, "is_parallel": true, "self": 0.0016406230036134506, "children": { "_process_rank_one_or_two_observation": { "total": 0.009663085996206355, "count": 72, "is_parallel": true, "self": 0.009663085996206355 } } }, "UnityEnvironment.step": { "total": 4575.8741769479275, "count": 115566, "is_parallel": true, "self": 11.794880780051244, "children": { "UnityEnvironment._generate_step_input": { "total": 70.614689448077, "count": 115566, "is_parallel": true, "self": 70.614689448077 }, "communicator.exchange": { "total": 4352.519821924898, "count": 115566, "is_parallel": true, "self": 4352.519821924898 }, "steps_from_proto": { "total": 140.9447847949015, "count": 231132, "is_parallel": true, "self": 18.691651626925108, "children": { "_process_rank_one_or_two_observation": { "total": 122.25313316797639, "count": 924528, "is_parallel": true, "self": 122.25313316797639 } } } } } } } } } } }, "trainer_advance": { "total": 1170.8290425160567, "count": 115566, "self": 11.90267321900501, "children": { "process_trajectory": { "total": 186.59435638205292, "count": 115566, "self": 186.2661455490525, "children": { "RLTrainer._checkpoint": { "total": 0.3282108330004121, "count": 3, "self": 0.3282108330004121 } } }, "_update_policy": { "total": 972.3320129149988, "count": 83, "self": 150.30206223099822, "children": { "TorchPOCAOptimizer.update": { "total": 822.0299506840006, "count": 2490, "self": 822.0299506840006 } } } } } } }, "trainer_threads": { "total": 6.250002115848474e-07, "count": 1, "self": 6.250002115848474e-07 }, "TrainerController._save_models": { "total": 0.11568799999986368, "count": 1, "self": 0.01969062500029395, "children": { "RLTrainer._checkpoint": { "total": 0.09599737499956973, "count": 1, "self": 0.09599737499956973 } } } } } } }