poca-SoccerTwos / run_logs /timers.json
nazmalaz's picture
First Push
1c0119c verified
raw
history blame
15.9 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.593958854675293,
"min": 2.5524582862854004,
"max": 3.29573130607605,
"count": 361
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 54618.3984375,
"min": 18797.15234375,
"max": 137694.359375,
"count": 361
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 62.34177215189873,
"min": 55.37078651685393,
"max": 999.0,
"count": 361
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19700.0,
"min": 12544.0,
"max": 29160.0,
"count": 361
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1473.2696352286403,
"min": 1199.4207269131573,
"max": 1473.694628559185,
"count": 352
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 232776.60236612515,
"min": 2407.608518724843,
"max": 263042.3834828209,
"count": 352
},
"SoccerTwos.Step.mean": {
"value": 3609983.0,
"min": 9598.0,
"max": 3609983.0,
"count": 361
},
"SoccerTwos.Step.sum": {
"value": 3609983.0,
"min": 9598.0,
"max": 3609983.0,
"count": 361
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.006089435424655676,
"min": -0.030074981972575188,
"max": 0.21363811194896698,
"count": 361
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.9621307849884033,
"min": -3.7292978763580322,
"max": 33.75482177734375,
"count": 361
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.003326693782582879,
"min": -0.028610987588763237,
"max": 0.21243532001972198,
"count": 361
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.5256175994873047,
"min": -3.4706549644470215,
"max": 33.564781188964844,
"count": 361
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 361
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 361
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.07237215057204041,
"min": -0.6923076923076923,
"max": 0.6493212201378562,
"count": 361
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -11.434799790382385,
"min": -23.015399932861328,
"max": 54.73320019245148,
"count": 361
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.07237215057204041,
"min": -0.6923076923076923,
"max": 0.6493212201378562,
"count": 361
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -11.434799790382385,
"min": -23.015399932861328,
"max": 54.73320019245148,
"count": 361
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 361
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 361
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.014454081596340984,
"min": 0.010496248743341615,
"max": 0.02352461224848715,
"count": 170
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.014454081596340984,
"min": 0.010496248743341615,
"max": 0.02352461224848715,
"count": 170
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.07907497584819793,
"min": 4.0442953468300404e-05,
"max": 0.09179584830999374,
"count": 170
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.07907497584819793,
"min": 4.0442953468300404e-05,
"max": 0.09179584830999374,
"count": 170
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.08101197282473246,
"min": 4.2009822209365666e-05,
"max": 0.09381573622425397,
"count": 170
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.08101197282473246,
"min": 4.2009822209365666e-05,
"max": 0.09381573622425397,
"count": 170
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 170
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 170
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 170
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 170
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 170
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 170
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1716367875",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\D:\\Anaconda\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1716403958"
},
"total": 36082.7076177,
"count": 1,
"self": 4.43322530000296,
"children": {
"run_training.setup": {
"total": 0.30512629999975616,
"count": 1,
"self": 0.30512629999975616
},
"TrainerController.start_learning": {
"total": 36077.96926609999,
"count": 1,
"self": 24.811193900255603,
"children": {
"TrainerController._reset_env": {
"total": 18.133942500018748,
"count": 19,
"self": 18.133942500018748
},
"TrainerController.advance": {
"total": 36034.29294119972,
"count": 237082,
"self": 27.125916098317248,
"children": {
"env_step": {
"total": 20675.084004400087,
"count": 237082,
"self": 15789.020738299663,
"children": {
"SubprocessEnvManager._take_step": {
"total": 4870.119125000449,
"count": 237082,
"self": 158.04536249853845,
"children": {
"TorchPolicy.evaluate": {
"total": 4712.07376250191,
"count": 463355,
"self": 4712.07376250191
}
}
},
"workers": {
"total": 15.944141099974331,
"count": 237081,
"self": 0.0,
"children": {
"worker_root": {
"total": 36034.164161400295,
"count": 237081,
"is_parallel": true,
"self": 23602.54876419942,
"children": {
"steps_from_proto": {
"total": 0.11984779999784223,
"count": 38,
"is_parallel": true,
"self": 0.024168299980374286,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.09567950001746794,
"count": 152,
"is_parallel": true,
"self": 0.09567950001746794
}
}
},
"UnityEnvironment.step": {
"total": 12431.49554940088,
"count": 237081,
"is_parallel": true,
"self": 763.4748210015678,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 631.4447204981034,
"count": 237081,
"is_parallel": true,
"self": 631.4447204981034
},
"communicator.exchange": {
"total": 8557.835004600078,
"count": 237081,
"is_parallel": true,
"self": 8557.835004600078
},
"steps_from_proto": {
"total": 2478.7410033011283,
"count": 474162,
"is_parallel": true,
"self": 463.8925242019659,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2014.8484790991624,
"count": 1896648,
"is_parallel": true,
"self": 2014.8484790991624
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 15332.083020701317,
"count": 237081,
"self": 145.17765050088565,
"children": {
"process_trajectory": {
"total": 2881.143248600457,
"count": 237081,
"self": 2877.931790000458,
"children": {
"RLTrainer._checkpoint": {
"total": 3.2114585999988776,
"count": 7,
"self": 3.2114585999988776
}
}
},
"_update_policy": {
"total": 12305.762121599973,
"count": 170,
"self": 1432.1141110001463,
"children": {
"TorchPOCAOptimizer.update": {
"total": 10873.648010599827,
"count": 5103,
"self": 10873.648010599827
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.499997885432094e-06,
"count": 1,
"self": 4.499997885432094e-06
},
"TrainerController._save_models": {
"total": 0.7311840000038501,
"count": 1,
"self": 0.06569180000224151,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6654922000016086,
"count": 1,
"self": 0.6654922000016086
}
}
}
}
}
}
}