{ "name": "root", "gauges": { "SoccerTwos.Policy.Entropy.mean": { "value": 3.2864396572113037, "min": 3.2860982418060303, "max": 3.2864396572113037, "count": 2 }, "SoccerTwos.Policy.Entropy.sum": { "value": 105166.0703125, "min": 105155.140625, "max": 105166.0703125, "count": 2 }, "SoccerTwos.Environment.EpisodeLength.mean": { "value": 907.75, "min": 907.75, "max": 974.375, "count": 2 }, "SoccerTwos.Environment.EpisodeLength.sum": { "value": 29048.0, "min": 29048.0, "max": 31180.0, "count": 2 }, "SoccerTwos.Self-play.ELO.mean": { "value": 1197.2664120049913, "min": 1197.2664120049913, "max": 1197.7707189672171, "count": 2 }, "SoccerTwos.Self-play.ELO.sum": { "value": 2394.5328240099825, "min": 2394.5328240099825, "max": 2395.5414379344343, "count": 2 }, "SoccerTwos.Step.mean": { "value": 49544.0, "min": 29004.0, "max": 49544.0, "count": 3 }, "SoccerTwos.Step.sum": { "value": 49544.0, "min": 29004.0, "max": 49544.0, "count": 3 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": { "value": 0.06559079885482788, "min": 0.06507007777690887, "max": 0.06603433936834335, "count": 3 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": { "value": 0.6559079885482788, "min": 0.520560622215271, "max": 0.7924121022224426, "count": 3 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.mean": { "value": 0.06364027410745621, "min": 0.06297039985656738, "max": 0.06438735872507095, "count": 3 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.sum": { "value": 0.6364027261734009, "min": 0.5037631988525391, "max": 0.7726483345031738, "count": 3 }, "SoccerTwos.Environment.CumulativeReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 3 }, "SoccerTwos.Environment.CumulativeReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 3 }, "SoccerTwos.Policy.ExtrinsicReward.mean": { "value": 0.0, "min": -0.16666666666666666, "max": 0.049300000071525574, "count": 3 }, "SoccerTwos.Policy.ExtrinsicReward.sum": { "value": 0.0, "min": -2.0, "max": 0.3944000005722046, "count": 3 }, "SoccerTwos.Environment.GroupCumulativeReward.mean": { "value": 0.0, "min": -0.16666666666666666, "max": 0.049300000071525574, "count": 3 }, "SoccerTwos.Environment.GroupCumulativeReward.sum": { "value": 0.0, "min": -2.0, "max": 0.3944000005722046, "count": 3 }, "SoccerTwos.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 3 }, "SoccerTwos.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 3 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1717293427", "python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]", "command_line_arguments": "\\\\?\\C:\\Users\\juan.zinser\\Anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.3.0+cpu", "numpy_version": "1.23.5", "end_time_seconds": "1717293953" }, "total": 526.1551854000136, "count": 1, "self": 3.5203154000046197, "children": { "run_training.setup": { "total": 0.44083609999506734, "count": 1, "self": 0.44083609999506734 }, "TrainerController.start_learning": { "total": 522.1940339000139, "count": 1, "self": 0.08422390042687766, "children": { "TrainerController._reset_env": { "total": 443.3338799000194, "count": 1, "self": 443.3338799000194 }, "TrainerController.advance": { "total": 78.4913459995878, "count": 2003, "self": 0.09132069980842061, "children": { "env_step": { "total": 68.97023259996786, "count": 2003, "self": 52.427174900862155, "children": { "SubprocessEnvManager._take_step": { "total": 16.494033799273893, "count": 2003, "self": 0.49700410064542666, "children": { "TorchPolicy.evaluate": { "total": 15.997029698628467, "count": 4000, "self": 15.997029698628467 } } }, "workers": { "total": 0.049023899831809103, "count": 2003, "self": 0.0, "children": { "worker_root": { "total": 77.37274320068536, "count": 2003, "is_parallel": true, "self": 35.8309981013299, "children": { "steps_from_proto": { "total": 0.002807800017762929, "count": 2, "is_parallel": true, "self": 0.0006748000159859657, "children": { "_process_rank_one_or_two_observation": { "total": 0.0021330000017769635, "count": 8, "is_parallel": true, "self": 0.0021330000017769635 } } }, "UnityEnvironment.step": { "total": 41.538937299337704, "count": 2003, "is_parallel": true, "self": 2.1499764994659927, "children": { "UnityEnvironment._generate_step_input": { "total": 1.73611050072941, "count": 2003, "is_parallel": true, "self": 1.73611050072941 }, "communicator.exchange": { "total": 30.764309499616502, "count": 2003, "is_parallel": true, "self": 30.764309499616502 }, "steps_from_proto": { "total": 6.888540799525799, "count": 4006, "is_parallel": true, "self": 1.410261200624518, "children": { "_process_rank_one_or_two_observation": { "total": 5.478279598901281, "count": 16024, "is_parallel": true, "self": 5.478279598901281 } } } } } } } } } } }, "trainer_advance": { "total": 9.429792699811514, "count": 2003, "self": 0.37227439988055266, "children": { "process_trajectory": { "total": 9.057518299930962, "count": 2003, "self": 9.057518299930962 } } } } }, "trainer_threads": { "total": 1.2999807950109243e-06, "count": 1, "self": 1.2999807950109243e-06 }, "TrainerController._save_models": { "total": 0.28458279999904335, "count": 1, "self": 0.014451399998506531, "children": { "RLTrainer._checkpoint": { "total": 0.2701314000005368, "count": 1, "self": 0.2701314000005368 } } } } } } }