{ "name": "root", "gauges": { "SoccerTwos.Policy.Entropy.mean": { "value": 1.3997917175292969, "min": 1.3676737546920776, "max": 3.2957422733306885, "count": 5000 }, "SoccerTwos.Policy.Entropy.sum": { "value": 27637.48828125, "min": 5772.9326171875, "max": 159787.765625, "count": 5000 }, "SoccerTwos.Environment.EpisodeLength.mean": { "value": 56.54651162790697, "min": 40.44166666666667, "max": 999.0, "count": 5000 }, "SoccerTwos.Environment.EpisodeLength.sum": { "value": 19452.0, "min": 3996.0, "max": 31384.0, "count": 5000 }, "SoccerTwos.Self-play.ELO.mean": { "value": 1740.097162852889, "min": 1192.2097320158298, "max": 1859.176740605254, "count": 4934 }, "SoccerTwos.Self-play.ELO.sum": { "value": 299296.7120106969, "min": 2384.9615961377467, "max": 444539.1368505022, "count": 4934 }, "SoccerTwos.Step.mean": { "value": 49999964.0, "min": 9492.0, "max": 49999964.0, "count": 5000 }, "SoccerTwos.Step.sum": { "value": 49999964.0, "min": 9492.0, "max": 49999964.0, "count": 5000 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": { "value": -0.0005859807133674622, "min": -0.1506473273038864, "max": 0.20108574628829956, "count": 5000 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": { "value": -0.10078868269920349, "min": -28.171049118041992, "max": 28.821969985961914, "count": 5000 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.mean": { "value": -0.000606549670919776, "min": -0.15315955877304077, "max": 0.21116802096366882, "count": 5000 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.sum": { "value": -0.10432654619216919, "min": -28.640838623046875, "max": 29.88899803161621, "count": 5000 }, "SoccerTwos.Environment.CumulativeReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 5000 }, "SoccerTwos.Environment.CumulativeReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 5000 }, "SoccerTwos.Policy.ExtrinsicReward.mean": { "value": 0.05898139365883761, "min": -0.5714285714285714, "max": 0.4941514545274012, "count": 5000 }, "SoccerTwos.Policy.ExtrinsicReward.sum": { "value": 10.144799709320068, "min": -68.44440007209778, "max": 67.61360049247742, "count": 5000 }, "SoccerTwos.Environment.GroupCumulativeReward.mean": { "value": 0.05898139365883761, "min": -0.5714285714285714, "max": 0.4941514545274012, "count": 5000 }, "SoccerTwos.Environment.GroupCumulativeReward.sum": { "value": 10.144799709320068, "min": -68.44440007209778, "max": 67.61360049247742, "count": 5000 }, "SoccerTwos.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 5000 }, "SoccerTwos.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 5000 }, "SoccerTwos.Losses.PolicyLoss.mean": { "value": 0.017042746671359056, "min": 0.009699820542300586, "max": 0.027335902423753094, "count": 2418 }, "SoccerTwos.Losses.PolicyLoss.sum": { "value": 0.017042746671359056, "min": 0.009699820542300586, "max": 0.027335902423753094, "count": 2418 }, "SoccerTwos.Losses.ValueLoss.mean": { "value": 0.10138025929530461, "min": 8.222247904162334e-07, "max": 0.1240983600417773, "count": 2418 }, "SoccerTwos.Losses.ValueLoss.sum": { "value": 0.10138025929530461, "min": 8.222247904162334e-07, "max": 0.1240983600417773, "count": 2418 }, "SoccerTwos.Losses.BaselineLoss.mean": { "value": 0.10226851354042689, "min": 2.3433679056476344e-06, "max": 0.12695316970348358, "count": 2418 }, "SoccerTwos.Losses.BaselineLoss.sum": { "value": 0.10226851354042689, "min": 2.3433679056476344e-06, "max": 0.12695316970348358, "count": 2418 }, "SoccerTwos.Policy.LearningRate.mean": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 2418 }, "SoccerTwos.Policy.LearningRate.sum": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 2418 }, "SoccerTwos.Policy.Epsilon.mean": { "value": 0.20000000000000007, "min": 0.19999999999999996, "max": 0.20000000000000007, "count": 2418 }, "SoccerTwos.Policy.Epsilon.sum": { "value": 0.20000000000000007, "min": 0.19999999999999996, "max": 0.20000000000000007, "count": 2418 }, "SoccerTwos.Policy.Beta.mean": { "value": 0.005000000000000001, "min": 0.005, "max": 0.005000000000000001, "count": 2418 }, "SoccerTwos.Policy.Beta.sum": { "value": 0.005000000000000001, "min": 0.005, "max": 0.005000000000000001, "count": 2418 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1707573668", "python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]", "command_line_arguments": "/home/harry/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --resume", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.2.0+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1707679898" }, "total": 106230.53303386699, "count": 1, "self": 10.004185984987998, "children": { "run_training.setup": { "total": 0.01042303800022637, "count": 1, "self": 0.01042303800022637 }, "TrainerController.start_learning": { "total": 106220.518424844, "count": 1, "self": 39.50898021933972, "children": { "TrainerController._reset_env": { "total": 22.475220152002294, "count": 250, "self": 22.475220152002294 }, "TrainerController.advance": { "total": 106158.37573389766, "count": 3436849, "self": 36.238270207715686, "children": { "env_step": { "total": 73616.25407534854, "count": 3436849, "self": 32708.251269913162, "children": { "SubprocessEnvManager._take_step": { "total": 40882.19083263852, "count": 3436849, "self": 259.0061722970713, "children": { "TorchPolicy.evaluate": { "total": 40623.184660341445, "count": 6289914, "self": 40623.184660341445 } } }, "workers": { "total": 25.811972796856026, "count": 3436849, "self": 0.0, "children": { "worker_root": { "total": 106144.12933662834, "count": 3436849, "is_parallel": true, "self": 78373.695758114, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.002183701999456389, "count": 2, "is_parallel": true, "self": 0.0007340239990298869, "children": { "_process_rank_one_or_two_observation": { "total": 0.001449678000426502, "count": 8, "is_parallel": true, "self": 0.001449678000426502 } } }, "UnityEnvironment.step": { "total": 0.01823554200018407, "count": 1, "is_parallel": true, "self": 0.0005521470002349815, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00025119600013567833, "count": 1, "is_parallel": true, "self": 0.00025119600013567833 }, "communicator.exchange": { "total": 0.01631410100071662, "count": 1, "is_parallel": true, "self": 0.01631410100071662 }, "steps_from_proto": { "total": 0.0011180979990967899, "count": 2, "is_parallel": true, "self": 0.0002731499998844811, "children": { "_process_rank_one_or_two_observation": { "total": 0.0008449479992123088, "count": 8, "is_parallel": true, "self": 0.0008449479992123088 } } } } } } }, "UnityEnvironment.step": { "total": 27770.178873597368, "count": 3436848, "is_parallel": true, "self": 1110.8332521675038, "children": { "UnityEnvironment._generate_step_input": { "total": 637.6274437751354, "count": 3436848, "is_parallel": true, "self": 637.6274437751354 }, "communicator.exchange": { "total": 22910.56119796649, "count": 3436848, "is_parallel": true, "self": 22910.56119796649 }, "steps_from_proto": { "total": 3111.1569796882377, "count": 6873696, "is_parallel": true, "self": 645.3900683012625, "children": { "_process_rank_one_or_two_observation": { "total": 2465.766911386975, "count": 27494784, "is_parallel": true, "self": 2465.766911386975 } } } } }, "steps_from_proto": { "total": 0.25470491696160025, "count": 498, "is_parallel": true, "self": 0.05424363997553883, "children": { "_process_rank_one_or_two_observation": { "total": 0.20046127698606142, "count": 1992, "is_parallel": true, "self": 0.20046127698606142 } } } } } } } } }, "trainer_advance": { "total": 32505.883388341408, "count": 3436849, "self": 369.9708427214682, "children": { "process_trajectory": { "total": 19406.847335894854, "count": 3436849, "self": 19392.51958688592, "children": { "RLTrainer._checkpoint": { "total": 14.327749008934916, "count": 100, "self": 14.327749008934916 } } }, "_update_policy": { "total": 12729.065209725084, "count": 2418, "self": 4581.71439362878, "children": { "TorchPOCAOptimizer.update": { "total": 8147.350816096304, "count": 72552, "self": 8147.350816096304 } } } } } } }, "trainer_threads": { "total": 6.210029823705554e-07, "count": 1, "self": 6.210029823705554e-07 }, "TrainerController._save_models": { "total": 0.15848995400301646, "count": 1, "self": 0.0010116490011569113, "children": { "RLTrainer._checkpoint": { "total": 0.15747830500185955, "count": 1, "self": 0.15747830500185955 } } } } } } }