poca-SoccerTwos / run_logs /timers.json
Mullerjo's picture
First Push`
0859aba verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.9570204019546509,
"min": 1.9343773126602173,
"max": 3.295746088027954,
"count": 345
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 36823.296875,
"min": 8203.974609375,
"max": 117514.0546875,
"count": 345
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 71.21739130434783,
"min": 42.08620689655172,
"max": 999.0,
"count": 345
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19656.0,
"min": 3996.0,
"max": 31452.0,
"count": 345
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1553.8124313206704,
"min": 1195.0758392557295,
"max": 1576.8404164349931,
"count": 340
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 214426.1155222525,
"min": 2390.151678511459,
"max": 361013.56374995445,
"count": 340
},
"SoccerTwos.Step.mean": {
"value": 3449919.0,
"min": 9744.0,
"max": 3449919.0,
"count": 345
},
"SoccerTwos.Step.sum": {
"value": 3449919.0,
"min": 9744.0,
"max": 3449919.0,
"count": 345
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.04013489559292793,
"min": -0.07595100998878479,
"max": 0.19094324111938477,
"count": 345
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -5.538615703582764,
"min": -13.15078353881836,
"max": 27.43212127685547,
"count": 345
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.045506782829761505,
"min": -0.08057396858930588,
"max": 0.1898651123046875,
"count": 345
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -6.279935836791992,
"min": -14.825610160827637,
"max": 27.314037322998047,
"count": 345
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 345
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 345
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.11474347892014877,
"min": -0.5080499991774559,
"max": 0.4875208344310522,
"count": 345
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -15.83460009098053,
"min": -53.75280010700226,
"max": 58.19000029563904,
"count": 345
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.11474347892014877,
"min": -0.5080499991774559,
"max": 0.4875208344310522,
"count": 345
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -15.83460009098053,
"min": -53.75280010700226,
"max": 58.19000029563904,
"count": 345
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 345
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 345
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.014121736674011724,
"min": 0.011251415135726953,
"max": 0.024424244975671174,
"count": 166
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.014121736674011724,
"min": 0.011251415135726953,
"max": 0.024424244975671174,
"count": 166
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10300621017813683,
"min": 0.0008304478940165912,
"max": 0.12439776758352915,
"count": 166
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10300621017813683,
"min": 0.0008304478940165912,
"max": 0.12439776758352915,
"count": 166
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10415177519122759,
"min": 0.000824070256203413,
"max": 0.12655389656623203,
"count": 166
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10415177519122759,
"min": 0.000824070256203413,
"max": 0.12655389656623203,
"count": 166
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 166
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 166
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 166
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 166
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 166
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 166
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1716214895",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Programming\\Anaconda\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1716229238"
},
"total": 14343.405976699985,
"count": 1,
"self": 0.33416739996755496,
"children": {
"run_training.setup": {
"total": 0.14068520002183504,
"count": 1,
"self": 0.14068520002183504
},
"TrainerController.start_learning": {
"total": 14342.931124099996,
"count": 1,
"self": 6.827881407021778,
"children": {
"TrainerController._reset_env": {
"total": 8.31276689999504,
"count": 18,
"self": 8.31276689999504
},
"TrainerController.advance": {
"total": 14327.57910279298,
"count": 235036,
"self": 6.421814991655992,
"children": {
"env_step": {
"total": 5042.428487199213,
"count": 235036,
"self": 3943.1594364810153,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1094.9635137108562,
"count": 235036,
"self": 44.598583820828935,
"children": {
"TorchPolicy.evaluate": {
"total": 1050.3649298900273,
"count": 434380,
"self": 1050.3649298900273
}
}
},
"workers": {
"total": 4.305537007341627,
"count": 235035,
"self": 0.0,
"children": {
"worker_root": {
"total": 14328.377224602824,
"count": 235035,
"is_parallel": true,
"self": 11227.627575801482,
"children": {
"steps_from_proto": {
"total": 0.037452599965035915,
"count": 36,
"is_parallel": true,
"self": 0.007069700048305094,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.03038289991673082,
"count": 144,
"is_parallel": true,
"self": 0.03038289991673082
}
}
},
"UnityEnvironment.step": {
"total": 3100.712196201377,
"count": 235035,
"is_parallel": true,
"self": 183.86348560912302,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 219.3428394010698,
"count": 235035,
"is_parallel": true,
"self": 219.3428394010698
},
"communicator.exchange": {
"total": 2192.8369953949295,
"count": 235035,
"is_parallel": true,
"self": 2192.8369953949295
},
"steps_from_proto": {
"total": 504.6688757962547,
"count": 470070,
"is_parallel": true,
"self": 96.13699782348704,
"children": {
"_process_rank_one_or_two_observation": {
"total": 408.5318779727677,
"count": 1880280,
"is_parallel": true,
"self": 408.5318779727677
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 9278.728800602112,
"count": 235035,
"self": 51.284025008324534,
"children": {
"process_trajectory": {
"total": 3920.6790171939065,
"count": 235035,
"self": 3919.8113520939078,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8676650999987032,
"count": 6,
"self": 0.8676650999987032
}
}
},
"_update_policy": {
"total": 5306.765758399881,
"count": 166,
"self": 683.6224011009035,
"children": {
"TorchPOCAOptimizer.update": {
"total": 4623.143357298977,
"count": 4989,
"self": 4623.143357298977
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.4999899324029684e-06,
"count": 1,
"self": 2.4999899324029684e-06
},
"TrainerController._save_models": {
"total": 0.2113705000083428,
"count": 1,
"self": 0.010710100003052503,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20066040000529028,
"count": 1,
"self": 0.20066040000529028
}
}
}
}
}
}
}