{ "name": "root", "gauges": { "SoccerTwos.Policy.Entropy.mean": { "value": 2.6047139167785645, "min": 2.6047139167785645, "max": 3.295713424682617, "count": 399 }, "SoccerTwos.Policy.Entropy.sum": { "value": 33090.28515625, "min": 8448.662109375, "max": 112964.9921875, "count": 399 }, "SoccerTwos.Environment.EpisodeLength.mean": { "value": 999.0, "min": 438.8, "max": 999.0, "count": 399 }, "SoccerTwos.Environment.EpisodeLength.sum": { "value": 19980.0, "min": 15348.0, "max": 27820.0, "count": 399 }, "SoccerTwos.Self-play.ELO.mean": { "value": 1195.7787662324947, "min": 1195.169598909233, "max": 1207.6894166368488, "count": 165 }, "SoccerTwos.Self-play.ELO.sum": { "value": 2391.5575324649894, "min": 2390.8669109527063, "max": 14353.131131077156, "count": 165 }, "SoccerTwos.Step.mean": { "value": 3989256.0, "min": 9926.0, "max": 3989256.0, "count": 399 }, "SoccerTwos.Step.sum": { "value": 3989256.0, "min": 9926.0, "max": 3989256.0, "count": 399 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": { "value": 1.2421657629602123e-05, "min": -0.017404751852154732, "max": 0.027386397123336792, "count": 399 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": { "value": 0.00012421657447703183, "min": -0.19520880281925201, "max": 0.33119234442710876, "count": 399 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.mean": { "value": 1.5460480426554568e-05, "min": -0.01442845817655325, "max": 0.03021582029759884, "count": 399 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.sum": { "value": 0.00015460480062756687, "min": -0.18756996095180511, "max": 0.356469064950943, "count": 399 }, "SoccerTwos.Environment.CumulativeReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 399 }, "SoccerTwos.Environment.CumulativeReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 399 }, "SoccerTwos.Policy.ExtrinsicReward.mean": { "value": 0.0, "min": -0.5, "max": 0.25540000200271606, "count": 399 }, "SoccerTwos.Policy.ExtrinsicReward.sum": { "value": 0.0, "min": -8.0, "max": 3.575600028038025, "count": 399 }, "SoccerTwos.Environment.GroupCumulativeReward.mean": { "value": 0.0, "min": -0.5, "max": 0.25540000200271606, "count": 399 }, "SoccerTwos.Environment.GroupCumulativeReward.sum": { "value": 0.0, "min": -8.0, "max": 3.575600028038025, "count": 399 }, "SoccerTwos.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 399 }, "SoccerTwos.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 399 }, "SoccerTwos.Losses.PolicyLoss.mean": { "value": 0.014668615678480516, "min": 0.011844604785437696, "max": 0.023054146430513355, "count": 184 }, "SoccerTwos.Losses.PolicyLoss.sum": { "value": 0.014668615678480516, "min": 0.011844604785437696, "max": 0.023054146430513355, "count": 184 }, "SoccerTwos.Losses.ValueLoss.mean": { "value": 5.138135576269557e-09, "min": 5.09807039546691e-09, "max": 0.004275869624689221, "count": 184 }, "SoccerTwos.Losses.ValueLoss.sum": { "value": 5.138135576269557e-09, "min": 5.09807039546691e-09, "max": 0.004275869624689221, "count": 184 }, "SoccerTwos.Losses.BaselineLoss.mean": { "value": 8.624157477044036e-09, "min": 8.057382405723956e-09, "max": 0.00435616773708413, "count": 184 }, "SoccerTwos.Losses.BaselineLoss.sum": { "value": 8.624157477044036e-09, "min": 8.057382405723956e-09, "max": 0.00435616773708413, "count": 184 }, "SoccerTwos.Policy.LearningRate.mean": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 184 }, "SoccerTwos.Policy.LearningRate.sum": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 184 }, "SoccerTwos.Policy.Epsilon.mean": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 184 }, "SoccerTwos.Policy.Epsilon.sum": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 184 }, "SoccerTwos.Policy.Beta.mean": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 184 }, "SoccerTwos.Policy.Beta.sum": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 184 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1701284033", "python_version": "3.10.11 (tags/v3.10.11:7d4cc5a, Apr 5 2023, 00:38:17) [MSC v.1929 64 bit (AMD64)]", "command_line_arguments": "\\\\?\\D:\\Masters\\Git_repo\\RL_MultiAgent\\.venv_RL\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./SoccerTwos/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.1.1+cpu", "numpy_version": "1.23.5", "end_time_seconds": "1701296644" }, "total": 12611.553825399955, "count": 1, "self": 1.4602283000131138, "children": { "run_training.setup": { "total": 0.13341069995658472, "count": 1, "self": 0.13341069995658472 }, "TrainerController.start_learning": { "total": 12609.960186399985, "count": 1, "self": 7.191418008878827, "children": { "TrainerController._reset_env": { "total": 7.366831000079401, "count": 20, "self": 7.366831000079401 }, "TrainerController.advance": { "total": 12595.227138291055, "count": 259214, "self": 7.332780057855416, "children": { "env_step": { "total": 6708.723880513397, "count": 259214, "self": 5340.316998616443, "children": { "SubprocessEnvManager._take_step": { "total": 1363.5959701910033, "count": 259214, "self": 43.70745589106809, "children": { "TorchPolicy.evaluate": { "total": 1319.8885142999352, "count": 515210, "self": 1319.8885142999352 } } }, "workers": { "total": 4.810911705950275, "count": 259213, "self": 0.0, "children": { "worker_root": { "total": 12593.839778310678, "count": 259213, "is_parallel": true, "self": 8205.552797507844, "children": { "steps_from_proto": { "total": 0.0431424998678267, "count": 40, "is_parallel": true, "self": 0.008815500419586897, "children": { "_process_rank_one_or_two_observation": { "total": 0.0343269994482398, "count": 160, "is_parallel": true, "self": 0.0343269994482398 } } }, "UnityEnvironment.step": { "total": 4388.243838302966, "count": 259213, "is_parallel": true, "self": 220.74492688645842, "children": { "UnityEnvironment._generate_step_input": { "total": 166.84153231832897, "count": 259213, "is_parallel": true, "self": 166.84153231832897 }, "communicator.exchange": { "total": 3305.7647341149277, "count": 259213, "is_parallel": true, "self": 3305.7647341149277 }, "steps_from_proto": { "total": 694.8926449832506, "count": 518426, "is_parallel": true, "self": 129.66116266377503, "children": { "_process_rank_one_or_two_observation": { "total": 565.2314823194756, "count": 2073704, "is_parallel": true, "self": 565.2314823194756 } } } } } } } } } } }, "trainer_advance": { "total": 5879.170477719803, "count": 259213, "self": 52.236643325479236, "children": { "process_trajectory": { "total": 885.5209778941353, "count": 259213, "self": 882.8878905941965, "children": { "RLTrainer._checkpoint": { "total": 2.6330872999387793, "count": 7, "self": 2.6330872999387793 } } }, "_update_policy": { "total": 4941.412856500188, "count": 184, "self": 620.3567591002211, "children": { "TorchPOCAOptimizer.update": { "total": 4321.056097399967, "count": 5520, "self": 4321.056097399967 } } } } } } }, "trainer_threads": { "total": 1.00000761449337e-06, "count": 1, "self": 1.00000761449337e-06 }, "TrainerController._save_models": { "total": 0.17479809996439144, "count": 1, "self": 0.010054799960926175, "children": { "RLTrainer._checkpoint": { "total": 0.16474330000346527, "count": 1, "self": 0.16474330000346527 } } } } } } }