{ "name": "root", "gauges": { "SoccerTwos.Policy.Entropy.mean": { "value": 3.2758185863494873, "min": 3.273653268814087, "max": 3.2957606315612793, "count": 6 }, "SoccerTwos.Policy.Entropy.sum": { "value": 56501.3203125, "min": 33496.95703125, "max": 105464.265625, "count": 6 }, "SoccerTwos.Environment.EpisodeLength.mean": { "value": 837.8333333333334, "min": 720.0, "max": 999.0, "count": 6 }, "SoccerTwos.Environment.EpisodeLength.sum": { "value": 20108.0, "min": 14400.0, "max": 28084.0, "count": 6 }, "SoccerTwos.Self-play.ELO.mean": { "value": 1200.970418087186, "min": 1200.3463957600693, "max": 1201.2421091626256, "count": 5 }, "SoccerTwos.Self-play.ELO.sum": { "value": 2401.940836174372, "min": 2401.940836174372, "max": 9606.343970681504, "count": 5 }, "SoccerTwos.Step.mean": { "value": 59306.0, "min": 9058.0, "max": 59306.0, "count": 6 }, "SoccerTwos.Step.sum": { "value": 59306.0, "min": 9058.0, "max": 59306.0, "count": 6 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": { "value": 0.05247111618518829, "min": 0.05247111618518829, "max": 0.10548456013202667, "count": 6 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": { "value": 0.6296533942222595, "min": 0.6296533942222595, "max": 1.0729753971099854, "count": 6 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.mean": { "value": 0.0441579669713974, "min": 0.0441579669713974, "max": 0.06287874281406403, "count": 6 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.sum": { "value": 0.5298956036567688, "min": 0.5298956036567688, "max": 0.817385733127594, "count": 6 }, "SoccerTwos.Environment.CumulativeReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 6 }, "SoccerTwos.Environment.CumulativeReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 6 }, "SoccerTwos.Policy.ExtrinsicReward.mean": { "value": 0.18616666396458945, "min": -0.34632727232846344, "max": 0.18616666396458945, "count": 6 }, "SoccerTwos.Policy.ExtrinsicReward.sum": { "value": 2.2339999675750732, "min": -3.809599995613098, "max": 2.2339999675750732, "count": 6 }, "SoccerTwos.Environment.GroupCumulativeReward.mean": { "value": 0.18616666396458945, "min": -0.34632727232846344, "max": 0.18616666396458945, "count": 6 }, "SoccerTwos.Environment.GroupCumulativeReward.sum": { "value": 2.2339999675750732, "min": -3.809599995613098, "max": 2.2339999675750732, "count": 6 }, "SoccerTwos.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 6 }, "SoccerTwos.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 6 }, "SoccerTwos.Losses.PolicyLoss.mean": { "value": 0.015314620580223466, "min": 0.015118701877751543, "max": 0.015314620580223466, "count": 2 }, "SoccerTwos.Losses.PolicyLoss.sum": { "value": 0.015314620580223466, "min": 0.015118701877751543, "max": 0.015314620580223466, "count": 2 }, "SoccerTwos.Losses.ValueLoss.mean": { "value": 0.0014895137952407821, "min": 0.0014895137952407821, "max": 0.04832433110568672, "count": 2 }, "SoccerTwos.Losses.ValueLoss.sum": { "value": 0.0014895137952407821, "min": 0.0014895137952407821, "max": 0.04832433110568672, "count": 2 }, "SoccerTwos.Losses.BaselineLoss.mean": { "value": 0.0017837476402443523, "min": 0.0017837476402443523, "max": 0.03310977824342747, "count": 2 }, "SoccerTwos.Losses.BaselineLoss.sum": { "value": 0.0017837476402443523, "min": 0.0017837476402443523, "max": 0.03310977824342747, "count": 2 }, "SoccerTwos.Policy.LearningRate.mean": { "value": 0.0010000000000000002, "min": 0.0010000000000000002, "max": 0.0010000000000000002, "count": 2 }, "SoccerTwos.Policy.LearningRate.sum": { "value": 0.0010000000000000002, "min": 0.0010000000000000002, "max": 0.0010000000000000002, "count": 2 }, "SoccerTwos.Policy.Epsilon.mean": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 2 }, "SoccerTwos.Policy.Epsilon.sum": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 2 }, "SoccerTwos.Policy.Beta.mean": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 2 }, "SoccerTwos.Policy.Beta.sum": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 2 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1687619014", "python_version": "3.10.10 | packaged by conda-forge | (main, Mar 24 2023, 20:08:06) [GCC 11.3.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1687619376" }, "total": 361.4386399209998, "count": 1, "self": 0.006794630999138462, "children": { "run_training.setup": { "total": 0.027845077000165475, "count": 1, "self": 0.027845077000165475 }, "TrainerController.start_learning": { "total": 361.4040002130005, "count": 1, "self": 0.19443533102366928, "children": { "TrainerController._reset_env": { "total": 1.6044772260001992, "count": 1, "self": 1.6044772260001992 }, "TrainerController.advance": { "total": 359.3161118259768, "count": 4264, "self": 0.19566270298400923, "children": { "env_step": { "total": 160.83705588901375, "count": 4264, "self": 132.81142681304573, "children": { "SubprocessEnvManager._take_step": { "total": 27.920666818979953, "count": 4264, "self": 1.2140916279568046, "children": { "TorchPolicy.evaluate": { "total": 26.70657519102315, "count": 8482, "self": 26.70657519102315 } } }, "workers": { "total": 0.10496225698807393, "count": 4264, "self": 0.0, "children": { "worker_root": { "total": 332.1995521339868, "count": 4264, "is_parallel": true, "self": 222.28195962599693, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.010179573001551034, "count": 2, "is_parallel": true, "self": 0.005134082000950002, "children": { "_process_rank_one_or_two_observation": { "total": 0.005045491000601032, "count": 8, "is_parallel": true, "self": 0.005045491000601032 } } }, "UnityEnvironment.step": { "total": 0.09950809500060132, "count": 1, "is_parallel": true, "self": 0.0014315160005935468, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0008355190002475865, "count": 1, "is_parallel": true, "self": 0.0008355190002475865 }, "communicator.exchange": { "total": 0.0833020519994534, "count": 1, "is_parallel": true, "self": 0.0833020519994534 }, "steps_from_proto": { "total": 0.013939008000306785, "count": 2, "is_parallel": true, "self": 0.006018803998813382, "children": { "_process_rank_one_or_two_observation": { "total": 0.007920204001493403, "count": 8, "is_parallel": true, "self": 0.007920204001493403 } } } } } } }, "UnityEnvironment.step": { "total": 109.91759250798987, "count": 4263, "is_parallel": true, "self": 6.159174291911768, "children": { "UnityEnvironment._generate_step_input": { "total": 3.37915113701456, "count": 4263, "is_parallel": true, "self": 3.37915113701456 }, "communicator.exchange": { "total": 80.7350587789997, "count": 4263, "is_parallel": true, "self": 80.7350587789997 }, "steps_from_proto": { "total": 19.644208300063838, "count": 8526, "is_parallel": true, "self": 3.5033778640390665, "children": { "_process_rank_one_or_two_observation": { "total": 16.14083043602477, "count": 34104, "is_parallel": true, "self": 16.14083043602477 } } } } } } } } } } }, "trainer_advance": { "total": 198.28339323397904, "count": 4264, "self": 1.1564656979808206, "children": { "process_trajectory": { "total": 29.758132814999044, "count": 4264, "self": 29.758132814999044 }, "_update_policy": { "total": 167.36879472099918, "count": 3, "self": 15.231152315996042, "children": { "TorchPOCAOptimizer.update": { "total": 152.13764240500313, "count": 71, "self": 152.13764240500313 } } } } } } }, "trainer_threads": { "total": 1.3669996405951679e-06, "count": 1, "self": 1.3669996405951679e-06 }, "TrainerController._save_models": { "total": 0.2889744630001587, "count": 1, "self": 0.0025971180002670735, "children": { "RLTrainer._checkpoint": { "total": 0.28637734499989165, "count": 1, "self": 0.28637734499989165 } } } } } } }