poca-SoccerTwos / run_logs /timers.json
PhysHunter's picture
First Push
1a099f6
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.5931496620178223,
"min": 1.5379153490066528,
"max": 1.950473666191101,
"count": 889
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 31404.166015625,
"min": 12367.951171875,
"max": 44546.3046875,
"count": 889
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 64.57894736842105,
"min": 43.3,
"max": 113.0,
"count": 889
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19632.0,
"min": 5196.0,
"max": 21736.0,
"count": 889
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1686.7929876739474,
"min": 1628.9054425126162,
"max": 1719.1533080607512,
"count": 889
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 256392.53412644,
"min": 99161.43184951188,
"max": 378840.6926137847,
"count": 889
},
"SoccerTwos.Step.mean": {
"value": 19999780.0,
"min": 11119959.0,
"max": 19999780.0,
"count": 889
},
"SoccerTwos.Step.sum": {
"value": 19999780.0,
"min": 11119959.0,
"max": 19999780.0,
"count": 889
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.02013554982841015,
"min": -0.13025151193141937,
"max": 0.09185903519392014,
"count": 889
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -3.060603618621826,
"min": -21.250598907470703,
"max": 17.545076370239258,
"count": 889
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.02120521478354931,
"min": -0.1314365267753601,
"max": 0.08880048990249634,
"count": 889
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -3.2231926918029785,
"min": -20.439422607421875,
"max": 16.960893630981445,
"count": 889
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 889
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 889
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.13405921114118477,
"min": -0.5107376808705537,
"max": 0.3571549976865451,
"count": 889
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 20.377000093460083,
"min": -70.48179996013641,
"max": 58.28660011291504,
"count": 889
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.13405921114118477,
"min": -0.5107376808705537,
"max": 0.3571549976865451,
"count": 889
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 20.377000093460083,
"min": -70.48179996013641,
"max": 58.28660011291504,
"count": 889
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 889
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 889
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.023435101409753165,
"min": 0.011006926382348563,
"max": 0.023990907132004698,
"count": 431
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.023435101409753165,
"min": 0.011006926382348563,
"max": 0.023990907132004698,
"count": 431
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09929284676909447,
"min": 0.07108619386951129,
"max": 0.12211424360672633,
"count": 431
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09929284676909447,
"min": 0.07108619386951129,
"max": 0.12211424360672633,
"count": 431
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10040274386604627,
"min": 0.07166211406389872,
"max": 0.12442898328105609,
"count": 431
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10040274386604627,
"min": 0.07166211406389872,
"max": 0.12442898328105609,
"count": 431
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 431
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 431
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 431
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 431
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 431
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 431
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1686380226",
"python_version": "3.9.16 | packaged by conda-forge | (main, Feb 1 2023, 21:38:11) \n[Clang 14.0.6 ]",
"command_line_arguments": "/Users/huangxin/miniconda/envs/rl/bin/mlagents-learn ./config/poca/MySoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos --run-id=MySoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1686411784"
},
"total": 31558.743268917002,
"count": 1,
"self": 0.18436241700328537,
"children": {
"run_training.setup": {
"total": 0.011634582999999976,
"count": 1,
"self": 0.011634582999999976
},
"TrainerController.start_learning": {
"total": 31558.547271917,
"count": 1,
"self": 6.859618200909608,
"children": {
"TrainerController._reset_env": {
"total": 2.877357038997752,
"count": 46,
"self": 2.877357038997752
},
"TrainerController.advance": {
"total": 31548.729604219094,
"count": 612292,
"self": 6.664372276431095,
"children": {
"env_step": {
"total": 24855.031640116315,
"count": 612292,
"self": 23881.4058230394,
"children": {
"SubprocessEnvManager._take_step": {
"total": 969.0804236776685,
"count": 612292,
"self": 28.495145847641993,
"children": {
"TorchPolicy.evaluate": {
"total": 940.5852778300265,
"count": 1115570,
"self": 940.5852778300265
}
}
},
"workers": {
"total": 4.545393399244284,
"count": 612292,
"self": 0.0,
"children": {
"worker_root": {
"total": 31546.64145962295,
"count": 612292,
"is_parallel": true,
"self": 8570.061137250581,
"children": {
"steps_from_proto": {
"total": 0.05852121300729651,
"count": 92,
"is_parallel": true,
"self": 0.008406461991320668,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.050114751015975845,
"count": 368,
"is_parallel": true,
"self": 0.050114751015975845
}
}
},
"UnityEnvironment.step": {
"total": 22976.521801159364,
"count": 612292,
"is_parallel": true,
"self": 62.71345968130845,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 368.9046247036486,
"count": 612292,
"is_parallel": true,
"self": 368.9046247036486
},
"communicator.exchange": {
"total": 21825.500489326132,
"count": 612292,
"is_parallel": true,
"self": 21825.500489326132
},
"steps_from_proto": {
"total": 719.4032274482732,
"count": 1224584,
"is_parallel": true,
"self": 99.59622331908429,
"children": {
"_process_rank_one_or_two_observation": {
"total": 619.807004129189,
"count": 4898336,
"is_parallel": true,
"self": 619.807004129189
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 6687.033591826349,
"count": 612292,
"self": 48.71599595516,
"children": {
"process_trajectory": {
"total": 1710.8917903592137,
"count": 612292,
"self": 1709.4232704842111,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4685198750025847,
"count": 18,
"self": 1.4685198750025847
}
}
},
"_update_policy": {
"total": 4927.425805511975,
"count": 431,
"self": 756.8673415870398,
"children": {
"TorchPOCAOptimizer.update": {
"total": 4170.5584639249355,
"count": 12930,
"self": 4170.5584639249355
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.749992174562067e-07,
"count": 1,
"self": 3.749992174562067e-07
},
"TrainerController._save_models": {
"total": 0.08069208300003083,
"count": 1,
"self": 0.0018718749997788109,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07882020800025202,
"count": 1,
"self": 0.07882020800025202
}
}
}
}
}
}
}