poca-SoccerTwos / run_logs /timers.json
mibalaguer's picture
h6 Push
d275762
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.8723636865615845,
"min": 1.7541974782943726,
"max": 3.2958028316497803,
"count": 593
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 37986.515625,
"min": 29945.1171875,
"max": 112419.703125,
"count": 593
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 49.857142857142854,
"min": 40.78151260504202,
"max": 999.0,
"count": 593
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19544.0,
"min": 16164.0,
"max": 24272.0,
"count": 593
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1602.0229134076994,
"min": 1194.1664684268064,
"max": 1632.8628018606792,
"count": 590
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 313996.4910279091,
"min": 2390.6186252188522,
"max": 385269.0296528018,
"count": 590
},
"SoccerTwos.Step.mean": {
"value": 5929882.0,
"min": 9152.0,
"max": 5929882.0,
"count": 593
},
"SoccerTwos.Step.sum": {
"value": 5929882.0,
"min": 9152.0,
"max": 5929882.0,
"count": 593
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.011386324651539326,
"min": -0.07559556514024734,
"max": 0.30134502053260803,
"count": 593
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -2.243105888366699,
"min": -14.816730499267578,
"max": 38.11559295654297,
"count": 593
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0042584724724292755,
"min": -0.08128456026315689,
"max": 0.30652838945388794,
"count": 593
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.8389191031455994,
"min": -15.931774139404297,
"max": 38.12245559692383,
"count": 593
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 593
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 593
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.09663959535850486,
"min": -0.6842105263157895,
"max": 0.6156806474731814,
"count": 593
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -19.038000285625458,
"min": -54.18879997730255,
"max": 79.10879975557327,
"count": 593
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.09663959535850486,
"min": -0.6842105263157895,
"max": 0.6156806474731814,
"count": 593
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -19.038000285625458,
"min": -54.18879997730255,
"max": 79.10879975557327,
"count": 593
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 593
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 593
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016978301322766735,
"min": 0.011128998205337363,
"max": 0.027278373801770308,
"count": 286
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016978301322766735,
"min": 0.011128998205337363,
"max": 0.027278373801770308,
"count": 286
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.1125520331164201,
"min": 0.0010447911180866262,
"max": 0.12484250217676163,
"count": 286
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.1125520331164201,
"min": 0.0010447911180866262,
"max": 0.12484250217676163,
"count": 286
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11449736927946409,
"min": 0.001045413986624529,
"max": 0.1277681755522887,
"count": 286
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11449736927946409,
"min": 0.001045413986624529,
"max": 0.1277681755522887,
"count": 286
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.000291106187964605,
"min": 0.000291106187964605,
"max": 0.0002999604150131951,
"count": 286
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.000291106187964605,
"min": 0.000291106187964605,
"max": 0.0002999604150131951,
"count": 286
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.19703539500000006,
"min": 0.19703539500000006,
"max": 0.19998680500000002,
"count": 286
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.19703539500000006,
"min": 0.19703539500000006,
"max": 0.19998680500000002,
"count": 286
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.004852066210499999,
"min": 0.004852066210499999,
"max": 0.004999341569499999,
"count": 286
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.004852066210499999,
"min": 0.004852066210499999,
"max": 0.004999341569499999,
"count": 286
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677938339",
"python_version": "3.9.16 | packaged by conda-forge | (main, Feb 1 2023, 21:39:03) \n[GCC 11.3.0]",
"command_line_arguments": "/opt/conda/envs/rl/bin/mlagents-learn ./ml-agents/config/poca/SoccerTwos.yaml --env ./ml-agents/training-envs-executables/Soccer/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu111",
"numpy_version": "1.21.2",
"end_time_seconds": "1678014961"
},
"total": 76621.71481010201,
"count": 1,
"self": 3.4311009410012048,
"children": {
"run_training.setup": {
"total": 0.024470964999636635,
"count": 1,
"self": 0.024470964999636635
},
"TrainerController.start_learning": {
"total": 76618.25923819601,
"count": 1,
"self": 10.956967044534395,
"children": {
"TrainerController._reset_env": {
"total": 9.670278003031854,
"count": 30,
"self": 9.670278003031854
},
"TrainerController.advance": {
"total": 76597.35695718948,
"count": 408131,
"self": 10.450964555231621,
"children": {
"env_step": {
"total": 72966.26667078599,
"count": 408131,
"self": 70579.69719207654,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2379.6736674868444,
"count": 408131,
"self": 55.50945471189334,
"children": {
"TorchPolicy.evaluate": {
"total": 2324.164212774951,
"count": 748077,
"self": 2324.164212774951
}
}
},
"workers": {
"total": 6.895811222610064,
"count": 408130,
"self": 0.0,
"children": {
"worker_root": {
"total": 76599.9000101015,
"count": 408130,
"is_parallel": true,
"self": 7397.858097038319,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005997055995976552,
"count": 2,
"is_parallel": true,
"self": 0.0008024279959499836,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.005194628000026569,
"count": 8,
"is_parallel": true,
"self": 0.005194628000026569
}
}
},
"UnityEnvironment.step": {
"total": 0.21028139200643636,
"count": 1,
"is_parallel": true,
"self": 0.0002831190067809075,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.006894007994560525,
"count": 1,
"is_parallel": true,
"self": 0.006894007994560525
},
"communicator.exchange": {
"total": 0.19763975898968056,
"count": 1,
"is_parallel": true,
"self": 0.19763975898968056
},
"steps_from_proto": {
"total": 0.005464506015414372,
"count": 2,
"is_parallel": true,
"self": 0.0005729170516133308,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004891588963801041,
"count": 8,
"is_parallel": true,
"self": 0.004891588963801041
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 69201.88179623117,
"count": 408129,
"is_parallel": true,
"self": 111.24734427715885,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1150.8276157755463,
"count": 408129,
"is_parallel": true,
"self": 1150.8276157755463
},
"communicator.exchange": {
"total": 65827.53694993193,
"count": 408129,
"is_parallel": true,
"self": 65827.53694993193
},
"steps_from_proto": {
"total": 2112.269886246533,
"count": 816258,
"is_parallel": true,
"self": 223.77512643302907,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1888.494759813504,
"count": 3265032,
"is_parallel": true,
"self": 1888.494759813504
}
}
}
}
},
"steps_from_proto": {
"total": 0.16011683200486004,
"count": 58,
"is_parallel": true,
"self": 0.016931115242186934,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.1431857167626731,
"count": 232,
"is_parallel": true,
"self": 0.1431857167626731
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 3620.6393218482553,
"count": 408130,
"self": 72.60683037881972,
"children": {
"process_trajectory": {
"total": 1473.3965893133136,
"count": 408130,
"self": 1465.544722266408,
"children": {
"RLTrainer._checkpoint": {
"total": 7.8518670469056815,
"count": 11,
"self": 7.8518670469056815
}
}
},
"_update_policy": {
"total": 2074.635902156122,
"count": 286,
"self": 1596.4877418332326,
"children": {
"TorchPOCAOptimizer.update": {
"total": 478.14816032288945,
"count": 8586,
"self": 478.14816032288945
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2280070222914219e-06,
"count": 1,
"self": 1.2280070222914219e-06
},
"TrainerController._save_models": {
"total": 0.2750347309629433,
"count": 1,
"self": 0.002131151966750622,
"children": {
"RLTrainer._checkpoint": {
"total": 0.27290357899619266,
"count": 1,
"self": 0.27290357899619266
}
}
}
}
}
}
}