{ "name": "root", "gauges": { "SoccerTwos.Policy.Entropy.mean": { "value": 1.4651871919631958, "min": 1.36436128616333, "max": 3.2957258224487305, "count": 3938 }, "SoccerTwos.Policy.Entropy.sum": { "value": 29069.314453125, "min": 20437.71875, "max": 121509.5546875, "count": 3938 }, "SoccerTwos.Environment.EpisodeLength.mean": { "value": 71.58208955223881, "min": 38.07874015748032, "max": 999.0, "count": 3938 }, "SoccerTwos.Environment.EpisodeLength.sum": { "value": 19184.0, "min": 9488.0, "max": 28496.0, "count": 3938 }, "SoccerTwos.Self-play.ELO.mean": { "value": 1613.3759688165437, "min": 1196.4580458095588, "max": 1699.91183871373, "count": 3933 }, "SoccerTwos.Self-play.ELO.sum": { "value": 216192.37982141686, "min": 2396.6060153360872, "max": 404351.4210272877, "count": 3933 }, "SoccerTwos.Step.mean": { "value": 39379979.0, "min": 9264.0, "max": 39379979.0, "count": 3938 }, "SoccerTwos.Step.sum": { "value": 39379979.0, "min": 9264.0, "max": 39379979.0, "count": 3938 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": { "value": -0.028221091255545616, "min": -0.1364418864250183, "max": 0.16764305531978607, "count": 3938 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": { "value": -3.809847354888916, "min": -26.0264949798584, "max": 28.241371154785156, "count": 3938 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.mean": { "value": -0.02871767431497574, "min": -0.1389887034893036, "max": 0.171001598238945, "count": 3938 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.sum": { "value": -3.8768861293792725, "min": -25.438573837280273, "max": 27.771480560302734, "count": 3938 }, "SoccerTwos.Environment.CumulativeReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 3938 }, "SoccerTwos.Environment.CumulativeReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 3938 }, "SoccerTwos.Policy.ExtrinsicReward.mean": { "value": -0.14708444012535943, "min": -0.47845999896526337, "max": 0.5422538495980777, "count": 3938 }, "SoccerTwos.Policy.ExtrinsicReward.sum": { "value": -19.856399416923523, "min": -70.02959990501404, "max": 65.90759986639023, "count": 3938 }, "SoccerTwos.Environment.GroupCumulativeReward.mean": { "value": -0.14708444012535943, "min": -0.47845999896526337, "max": 0.5422538495980777, "count": 3938 }, "SoccerTwos.Environment.GroupCumulativeReward.sum": { "value": -19.856399416923523, "min": -70.02959990501404, "max": 65.90759986639023, "count": 3938 }, "SoccerTwos.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 3938 }, "SoccerTwos.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 3938 }, "SoccerTwos.Losses.PolicyLoss.mean": { "value": 0.01512234068941325, "min": 0.009372958253758648, "max": 0.027328644739463927, "count": 1909 }, "SoccerTwos.Losses.PolicyLoss.sum": { "value": 0.01512234068941325, "min": 0.009372958253758648, "max": 0.027328644739463927, "count": 1909 }, "SoccerTwos.Losses.ValueLoss.mean": { "value": 0.0905092902481556, "min": 0.0004692011774750426, "max": 0.13288593292236328, "count": 1909 }, "SoccerTwos.Losses.ValueLoss.sum": { "value": 0.0905092902481556, "min": 0.0004692011774750426, "max": 0.13288593292236328, "count": 1909 }, "SoccerTwos.Losses.BaselineLoss.mean": { "value": 0.09135221044222513, "min": 0.00047009816529074063, "max": 0.13584585537513097, "count": 1909 }, "SoccerTwos.Losses.BaselineLoss.sum": { "value": 0.09135221044222513, "min": 0.00047009816529074063, "max": 0.13584585537513097, "count": 1909 }, "SoccerTwos.Policy.LearningRate.mean": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 1909 }, "SoccerTwos.Policy.LearningRate.sum": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 1909 }, "SoccerTwos.Policy.Epsilon.mean": { "value": 0.20000000000000007, "min": 0.20000000000000004, "max": 0.20000000000000007, "count": 1909 }, "SoccerTwos.Policy.Epsilon.sum": { "value": 0.20000000000000007, "min": 0.20000000000000004, "max": 0.20000000000000007, "count": 1909 }, "SoccerTwos.Policy.Beta.mean": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 1909 }, "SoccerTwos.Policy.Beta.sum": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 1909 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1702135217", "python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]", "command_line_arguments": "/home/hsu/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.2.0.dev20231209", "numpy_version": "1.23.5", "end_time_seconds": "1702203564" }, "total": 68346.36700036802, "count": 1, "self": 0.1686309259966947, "children": { "run_training.setup": { "total": 0.01080092298798263, "count": 1, "self": 0.01080092298798263 }, "TrainerController.start_learning": { "total": 68346.18756851903, "count": 1, "self": 36.38380844175117, "children": { "TrainerController._reset_env": { "total": 3.2244778322055936, "count": 197, "self": 3.2244778322055936 }, "TrainerController.advance": { "total": 68306.43024038902, "count": 2729180, "self": 37.24232844292419, "children": { "env_step": { "total": 25798.706939560943, "count": 2729180, "self": 20562.114796636277, "children": { "SubprocessEnvManager._take_step": { "total": 5212.468917373393, "count": 2729180, "self": 202.1068630972295, "children": { "TorchPolicy.evaluate": { "total": 5010.362054276164, "count": 4946422, "self": 5010.362054276164 } } }, "workers": { "total": 24.123225551273208, "count": 2729180, "self": 0.0, "children": { "worker_root": { "total": 68274.32351207826, "count": 2729180, "is_parallel": true, "self": 51735.21530482557, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0013688890030607581, "count": 2, "is_parallel": true, "self": 0.00031600194051861763, "children": { "_process_rank_one_or_two_observation": { "total": 0.0010528870625421405, "count": 8, "is_parallel": true, "self": 0.0010528870625421405 } } }, "UnityEnvironment.step": { "total": 0.015421924006659538, "count": 1, "is_parallel": true, "self": 0.0003119389875791967, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00024947902420535684, "count": 1, "is_parallel": true, "self": 0.00024947902420535684 }, "communicator.exchange": { "total": 0.013923735998105258, "count": 1, "is_parallel": true, "self": 0.013923735998105258 }, "steps_from_proto": { "total": 0.0009367699967697263, "count": 2, "is_parallel": true, "self": 0.00019477703608572483, "children": { "_process_rank_one_or_two_observation": { "total": 0.0007419929606840014, "count": 8, "is_parallel": true, "self": 0.0007419929606840014 } } } } } } }, "UnityEnvironment.step": { "total": 16538.906428998045, "count": 2729179, "is_parallel": true, "self": 893.2779358867556, "children": { "UnityEnvironment._generate_step_input": { "total": 619.0697817663895, "count": 2729179, "is_parallel": true, "self": 619.0697817663895 }, "communicator.exchange": { "total": 12409.576098904654, "count": 2729179, "is_parallel": true, "self": 12409.576098904654 }, "steps_from_proto": { "total": 2616.982612440246, "count": 5458358, "is_parallel": true, "self": 518.2229520890396, "children": { "_process_rank_one_or_two_observation": { "total": 2098.7596603512065, "count": 21833432, "is_parallel": true, "self": 2098.7596603512065 } } } } }, "steps_from_proto": { "total": 0.20177825464634225, "count": 392, "is_parallel": true, "self": 0.040090341470204294, "children": { "_process_rank_one_or_two_observation": { "total": 0.16168791317613795, "count": 1568, "is_parallel": true, "self": 0.16168791317613795 } } } } } } } } }, "trainer_advance": { "total": 42470.48097238515, "count": 2729180, "self": 286.7875606256421, "children": { "process_trajectory": { "total": 5525.13896649977, "count": 2729180, "self": 5515.070226623036, "children": { "RLTrainer._checkpoint": { "total": 10.068739876733162, "count": 78, "self": 10.068739876733162 } } }, "_update_policy": { "total": 36658.55444525974, "count": 1910, "self": 2781.0812082847697, "children": { "TorchPOCAOptimizer.update": { "total": 33877.47323697497, "count": 57305, "self": 33877.47323697497 } } } } } } }, "trainer_threads": { "total": 8.610077202320099e-07, "count": 1, "self": 8.610077202320099e-07 }, "TrainerController._save_models": { "total": 0.14904099504929036, "count": 1, "self": 0.0014489630120806396, "children": { "RLTrainer._checkpoint": { "total": 0.14759203203720972, "count": 1, "self": 0.14759203203720972 } } } } } } }