poca-SoccerTwos / run_logs /timers.json
Osborn-bh's picture
First Push
97c5680
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.1563942432403564,
"min": 3.149630308151245,
"max": 3.2956900596618652,
"count": 50
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 66663.046875,
"min": 25940.142578125,
"max": 111828.1015625,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 300.6470588235294,
"min": 261.3333333333333,
"max": 999.0,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20444.0,
"min": 14672.0,
"max": 28340.0,
"count": 50
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1219.5692826964037,
"min": 1198.0168266386224,
"max": 1219.5692826964037,
"count": 43
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 36587.07848089211,
"min": 2396.033653277245,
"max": 41411.70517627378,
"count": 43
},
"SoccerTwos.Step.mean": {
"value": 499232.0,
"min": 9950.0,
"max": 499232.0,
"count": 50
},
"SoccerTwos.Step.sum": {
"value": 499232.0,
"min": 9950.0,
"max": 499232.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.03765362128615379,
"min": -0.009388549253344536,
"max": 0.04418336600065231,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 1.2425694465637207,
"min": -0.12511155009269714,
"max": 1.458051085472107,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.03904584422707558,
"min": -0.009185528382658958,
"max": 0.05486869812011719,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 1.2885128259658813,
"min": -0.12370288372039795,
"max": 1.8106670379638672,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.48296364148457843,
"min": -0.5384615384615384,
"max": 0.48296364148457843,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 15.937800168991089,
"min": -12.301000118255615,
"max": 15.937800168991089,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.48296364148457843,
"min": -0.5384615384615384,
"max": 0.48296364148457843,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 15.937800168991089,
"min": -12.301000118255615,
"max": 15.937800168991089,
"count": 50
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.019610659056343138,
"min": 0.014299746853066609,
"max": 0.020620077780040446,
"count": 23
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.019610659056343138,
"min": 0.014299746853066609,
"max": 0.020620077780040446,
"count": 23
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.017787810197720924,
"min": 0.0004044039160362445,
"max": 0.017787810197720924,
"count": 23
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.017787810197720924,
"min": 0.0004044039160362445,
"max": 0.017787810197720924,
"count": 23
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.018563354884584746,
"min": 0.0004041414142799719,
"max": 0.018563354884584746,
"count": 23
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.018563354884584746,
"min": 0.0004041414142799719,
"max": 0.018563354884584746,
"count": 23
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 23
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 23
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 23
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 23
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 23
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 23
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1701589940",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1701591452"
},
"total": 1512.5341511859988,
"count": 1,
"self": 0.37710676999631687,
"children": {
"run_training.setup": {
"total": 0.05790981900281622,
"count": 1,
"self": 0.05790981900281622
},
"TrainerController.start_learning": {
"total": 1512.0991345969996,
"count": 1,
"self": 1.11143703858761,
"children": {
"TrainerController._reset_env": {
"total": 2.554106884996145,
"count": 3,
"self": 2.554106884996145
},
"TrainerController.advance": {
"total": 1508.223750191417,
"count": 32636,
"self": 1.2235327585294726,
"children": {
"env_step": {
"total": 897.9893766806708,
"count": 32636,
"self": 715.3867988462698,
"children": {
"SubprocessEnvManager._take_step": {
"total": 181.87036738228198,
"count": 32636,
"self": 6.958218602518173,
"children": {
"TorchPolicy.evaluate": {
"total": 174.9121487797638,
"count": 64724,
"self": 174.9121487797638
}
}
},
"workers": {
"total": 0.732210452119034,
"count": 32636,
"self": 0.0,
"children": {
"worker_root": {
"total": 1509.174110517837,
"count": 32636,
"is_parallel": true,
"self": 939.3240124901131,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003827897999144625,
"count": 2,
"is_parallel": true,
"self": 0.001106725998397451,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002721172000747174,
"count": 8,
"is_parallel": true,
"self": 0.002721172000747174
}
}
},
"UnityEnvironment.step": {
"total": 0.03494443399904412,
"count": 1,
"is_parallel": true,
"self": 0.0013591089918918442,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006393720032065175,
"count": 1,
"is_parallel": true,
"self": 0.0006393720032065175
},
"communicator.exchange": {
"total": 0.02888951200293377,
"count": 1,
"is_parallel": true,
"self": 0.02888951200293377
},
"steps_from_proto": {
"total": 0.00405644100101199,
"count": 2,
"is_parallel": true,
"self": 0.0007671239982300904,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0032893170027818996,
"count": 8,
"is_parallel": true,
"self": 0.0032893170027818996
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 569.845528380727,
"count": 32635,
"is_parallel": true,
"self": 36.250143324890814,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.222136394961126,
"count": 32635,
"is_parallel": true,
"self": 24.222136394961126
},
"communicator.exchange": {
"total": 405.8313904701572,
"count": 32635,
"is_parallel": true,
"self": 405.8313904701572
},
"steps_from_proto": {
"total": 103.5418581907179,
"count": 65270,
"is_parallel": true,
"self": 18.407087022293126,
"children": {
"_process_rank_one_or_two_observation": {
"total": 85.13477116842478,
"count": 261080,
"is_parallel": true,
"self": 85.13477116842478
}
}
}
}
},
"steps_from_proto": {
"total": 0.004569646996969823,
"count": 4,
"is_parallel": true,
"self": 0.0008489149986417033,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0037207319983281195,
"count": 16,
"is_parallel": true,
"self": 0.0037207319983281195
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 609.0108407522166,
"count": 32636,
"self": 9.133657020021928,
"children": {
"process_trajectory": {
"total": 99.48270784419219,
"count": 32636,
"self": 99.22933577319054,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2533720710016496,
"count": 1,
"self": 0.2533720710016496
}
}
},
"_update_policy": {
"total": 500.3944758880025,
"count": 23,
"self": 84.26536148403466,
"children": {
"TorchPOCAOptimizer.update": {
"total": 416.12911440396783,
"count": 693,
"self": 416.12911440396783
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.690011211205274e-07,
"count": 1,
"self": 9.690011211205274e-07
},
"TrainerController._save_models": {
"total": 0.20983951299785986,
"count": 1,
"self": 0.0025311909994343296,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20730832199842553,
"count": 1,
"self": 0.20730832199842553
}
}
}
}
}
}
}