{ "name": "root", "gauges": { "SoccerTwos.Policy.Entropy.mean": { "value": 3.2614316940307617, "min": 3.258707284927368, "max": 3.295718193054199, "count": 10 }, "SoccerTwos.Policy.Entropy.sum": { "value": 89650.234375, "min": 30292.70703125, "max": 105462.984375, "count": 10 }, "SoccerTwos.Environment.EpisodeLength.mean": { "value": 883.1666666666666, "min": 642.625, "max": 999.0, "count": 10 }, "SoccerTwos.Environment.EpisodeLength.sum": { "value": 21196.0, "min": 12328.0, "max": 27864.0, "count": 10 }, "SoccerTwos.Self-play.ELO.mean": { "value": 1200.6875594647793, "min": 1200.6875594647793, "max": 1203.943047306674, "count": 8 }, "SoccerTwos.Self-play.ELO.sum": { "value": 7204.125356788676, "min": 2403.8360569975775, "max": 9623.964240807352, "count": 8 }, "SoccerTwos.Step.mean": { "value": 99390.0, "min": 9948.0, "max": 99390.0, "count": 10 }, "SoccerTwos.Step.sum": { "value": 99390.0, "min": 9948.0, "max": 99390.0, "count": 10 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": { "value": -0.03879609331488609, "min": -0.10664743185043335, "max": -0.03879609331488609, "count": 10 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": { "value": -0.42675700783729553, "min": -1.1396600008010864, "max": -0.42675700783729553, "count": 10 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.mean": { "value": -0.03732283413410187, "min": -0.05642535164952278, "max": -0.03732283413410187, "count": 10 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.sum": { "value": -0.41055116057395935, "min": -0.8185520172119141, "max": -0.40887296199798584, "count": 10 }, "SoccerTwos.Environment.CumulativeReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 10 }, "SoccerTwos.Environment.CumulativeReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 10 }, "SoccerTwos.Policy.ExtrinsicReward.mean": { "value": -0.2970909096977927, "min": -0.36251764437731576, "max": 0.17113332947095236, "count": 10 }, "SoccerTwos.Policy.ExtrinsicReward.sum": { "value": -3.26800000667572, "min": -6.162799954414368, "max": 2.0535999536514282, "count": 10 }, "SoccerTwos.Environment.GroupCumulativeReward.mean": { "value": -0.2970909096977927, "min": -0.36251764437731576, "max": 0.17113332947095236, "count": 10 }, "SoccerTwos.Environment.GroupCumulativeReward.sum": { "value": -3.26800000667572, "min": -6.162799954414368, "max": 2.0535999536514282, "count": 10 }, "SoccerTwos.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 10 }, "SoccerTwos.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 10 }, "SoccerTwos.Losses.PolicyLoss.mean": { "value": 0.016862712742295116, "min": 0.01494209394052935, "max": 0.020022764914513876, "count": 4 }, "SoccerTwos.Losses.PolicyLoss.sum": { "value": 0.016862712742295116, "min": 0.01494209394052935, "max": 0.020022764914513876, "count": 4 }, "SoccerTwos.Losses.ValueLoss.mean": { "value": 0.0019297421560622752, "min": 0.0008788986102445051, "max": 2.080857078152864, "count": 4 }, "SoccerTwos.Losses.ValueLoss.sum": { "value": 0.0019297421560622752, "min": 0.0008788986102445051, "max": 2.080857078152864, "count": 4 }, "SoccerTwos.Losses.BaselineLoss.mean": { "value": 0.0019658890281183026, "min": 0.0013164583060036724, "max": 2.0529311431649453, "count": 4 }, "SoccerTwos.Losses.BaselineLoss.sum": { "value": 0.0019658890281183026, "min": 0.0013164583060036724, "max": 2.0529311431649453, "count": 4 }, "SoccerTwos.Policy.LearningRate.mean": { "value": 0.0020000000000000005, "min": 0.0020000000000000005, "max": 0.0020000000000000005, "count": 4 }, "SoccerTwos.Policy.LearningRate.sum": { "value": 0.0020000000000000005, "min": 0.0020000000000000005, "max": 0.0020000000000000005, "count": 4 }, "SoccerTwos.Policy.Epsilon.mean": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 4 }, "SoccerTwos.Policy.Epsilon.sum": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 4 }, "SoccerTwos.Policy.Beta.mean": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 4 }, "SoccerTwos.Policy.Beta.sum": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 4 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1699096824", "python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=/content/ml-agents/training-envs-executables/linux/SoccerTwos.x86_64 --run-id=SoccerTwos01 --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.1.0+cu118", "numpy_version": "1.23.5", "end_time_seconds": "1699097055" }, "total": 231.51909745199998, "count": 1, "self": 0.433240799000032, "children": { "run_training.setup": { "total": 0.068995616000052, "count": 1, "self": 0.068995616000052 }, "TrainerController.start_learning": { "total": 231.0168610369999, "count": 1, "self": 0.151787955996042, "children": { "TrainerController._reset_env": { "total": 4.105791149999959, "count": 1, "self": 4.105791149999959 }, "TrainerController.advance": { "total": 226.75851142400393, "count": 7079, "self": 0.17550149299711393, "children": { "env_step": { "total": 185.79979378700045, "count": 7079, "self": 142.23789582400184, "children": { "SubprocessEnvManager._take_step": { "total": 43.46966568900291, "count": 7079, "self": 1.2732977140242383, "children": { "TorchPolicy.evaluate": { "total": 42.19636797497867, "count": 14072, "self": 42.19636797497867 } } }, "workers": { "total": 0.09223227399570533, "count": 7078, "self": 0.0, "children": { "worker_root": { "total": 230.68744517500602, "count": 7078, "is_parallel": true, "self": 110.80680951599982, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.004338862000054178, "count": 2, "is_parallel": true, "self": 0.0009452800001099604, "children": { "_process_rank_one_or_two_observation": { "total": 0.0033935819999442174, "count": 8, "is_parallel": true, "self": 0.0033935819999442174 } } }, "UnityEnvironment.step": { "total": 0.0403638760000149, "count": 1, "is_parallel": true, "self": 0.0011478690000785718, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0009303319999389714, "count": 1, "is_parallel": true, "self": 0.0009303319999389714 }, "communicator.exchange": { "total": 0.03501296700005696, "count": 1, "is_parallel": true, "self": 0.03501296700005696 }, "steps_from_proto": { "total": 0.003272707999940394, "count": 2, "is_parallel": true, "self": 0.0005610439999372829, "children": { "_process_rank_one_or_two_observation": { "total": 0.002711664000003111, "count": 8, "is_parallel": true, "self": 0.002711664000003111 } } } } } } }, "UnityEnvironment.step": { "total": 119.8806356590062, "count": 7077, "is_parallel": true, "self": 7.849778665020153, "children": { "UnityEnvironment._generate_step_input": { "total": 5.165152892002652, "count": 7077, "is_parallel": true, "self": 5.165152892002652 }, "communicator.exchange": { "total": 83.09441377599637, "count": 7077, "is_parallel": true, "self": 83.09441377599637 }, "steps_from_proto": { "total": 23.771290325987025, "count": 14154, "is_parallel": true, "self": 3.8716112029813985, "children": { "_process_rank_one_or_two_observation": { "total": 19.899679123005626, "count": 56616, "is_parallel": true, "self": 19.899679123005626 } } } } } } } } } } }, "trainer_advance": { "total": 40.78321614400636, "count": 7078, "self": 1.2156955810019099, "children": { "process_trajectory": { "total": 9.969533540004363, "count": 7078, "self": 9.969533540004363 }, "_update_policy": { "total": 29.597987023000087, "count": 5, "self": 17.846319298001617, "children": { "TorchPOCAOptimizer.update": { "total": 11.75166772499847, "count": 150, "self": 11.75166772499847 } } } } } } }, "trainer_threads": { "total": 1.13999999484804e-06, "count": 1, "self": 1.13999999484804e-06 }, "TrainerController._save_models": { "total": 0.0007693669999753183, "count": 1, "self": 4.186799992567103e-05, "children": { "RLTrainer._checkpoint": { "total": 0.0007274990000496473, "count": 1, "self": 0.0007274990000496473 } } } } } } }