poca-SoccerTwos / run_logs /timers.json
HazemHM's picture
First Push
f764ead verified
raw
history blame
15.7 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.8498406410217285,
"min": 1.847397804260254,
"max": 3.2957520484924316,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 40370.921875,
"min": 22338.81640625,
"max": 115218.171875,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 48.15384615384615,
"min": 39.9344262295082,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20032.0,
"min": 12712.0,
"max": 28172.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1569.3760824336978,
"min": 1198.9287916317044,
"max": 1575.3067719073695,
"count": 491
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 326430.2251462091,
"min": 2403.352124755773,
"max": 379166.89638808754,
"count": 491
},
"SoccerTwos.Step.mean": {
"value": 4999928.0,
"min": 9104.0,
"max": 4999928.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999928.0,
"min": 9104.0,
"max": 4999928.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.00030533509561792016,
"min": -0.1340479701757431,
"max": 0.16007526218891144,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.06350970268249512,
"min": -30.16079330444336,
"max": 27.242170333862305,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.0011261050822213292,
"min": -0.12452097237110138,
"max": 0.16851264238357544,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.23422986268997192,
"min": -28.01721954345703,
"max": 26.100231170654297,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.156488462136342,
"min": -0.46153846153846156,
"max": 0.46030666629473366,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -32.54960012435913,
"min": -64.9243997335434,
"max": 56.925599694252014,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.156488462136342,
"min": -0.46153846153846156,
"max": 0.46030666629473366,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -32.54960012435913,
"min": -64.9243997335434,
"max": 56.925599694252014,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.015213420743384632,
"min": 0.009890492176054977,
"max": 0.025273329729679973,
"count": 240
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.015213420743384632,
"min": 0.009890492176054977,
"max": 0.025273329729679973,
"count": 240
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.11249540771047274,
"min": 7.357478219394882e-05,
"max": 0.12609227895736694,
"count": 240
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.11249540771047274,
"min": 7.357478219394882e-05,
"max": 0.12609227895736694,
"count": 240
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11473422423005104,
"min": 6.646392794209532e-05,
"max": 0.12873559867342313,
"count": 240
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11473422423005104,
"min": 6.646392794209532e-05,
"max": 0.12873559867342313,
"count": 240
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 240
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 240
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 240
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 240
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 240
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 240
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1705545396",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\hazem\\anaconda3\\envs\\rl2\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./ml-agents/training-envs-executables/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1705562907"
},
"total": 17510.391402700014,
"count": 1,
"self": 12.079760100023123,
"children": {
"run_training.setup": {
"total": 0.10511549998773262,
"count": 1,
"self": 0.10511549998773262
},
"TrainerController.start_learning": {
"total": 17498.206527100003,
"count": 1,
"self": 9.904199307580711,
"children": {
"TrainerController._reset_env": {
"total": 23.894243299931986,
"count": 25,
"self": 23.894243299931986
},
"TrainerController.advance": {
"total": 17460.972794092464,
"count": 342556,
"self": 10.442792986985296,
"children": {
"env_step": {
"total": 7150.955410609895,
"count": 342556,
"self": 5524.283039325499,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1620.3945509917103,
"count": 342556,
"self": 60.49473507993389,
"children": {
"TorchPolicy.evaluate": {
"total": 1559.8998159117764,
"count": 631144,
"self": 1559.8998159117764
}
}
},
"workers": {
"total": 6.277820292685647,
"count": 342556,
"self": 0.0,
"children": {
"worker_root": {
"total": 17456.62176100674,
"count": 342556,
"is_parallel": true,
"self": 13099.354612994939,
"children": {
"steps_from_proto": {
"total": 0.050975099962670356,
"count": 50,
"is_parallel": true,
"self": 0.010099100094521418,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.04087599986814894,
"count": 200,
"is_parallel": true,
"self": 0.04087599986814894
}
}
},
"UnityEnvironment.step": {
"total": 4357.216172911838,
"count": 342556,
"is_parallel": true,
"self": 217.58818619675003,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 208.6388254086196,
"count": 342556,
"is_parallel": true,
"self": 208.6388254086196
},
"communicator.exchange": {
"total": 3198.897393096471,
"count": 342556,
"is_parallel": true,
"self": 3198.897393096471
},
"steps_from_proto": {
"total": 732.0917682099971,
"count": 685112,
"is_parallel": true,
"self": 149.0753828828747,
"children": {
"_process_rank_one_or_two_observation": {
"total": 583.0163853271224,
"count": 2740448,
"is_parallel": true,
"self": 583.0163853271224
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 10299.574590495584,
"count": 342556,
"self": 75.59635449948837,
"children": {
"process_trajectory": {
"total": 1843.6895423959068,
"count": 342556,
"self": 1833.567360295914,
"children": {
"RLTrainer._checkpoint": {
"total": 10.122182099992642,
"count": 10,
"self": 10.122182099992642
}
}
},
"_update_policy": {
"total": 8380.288693600189,
"count": 240,
"self": 827.925356701744,
"children": {
"TorchPOCAOptimizer.update": {
"total": 7552.363336898445,
"count": 7200,
"self": 7552.363336898445
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.900022082030773e-06,
"count": 1,
"self": 2.900022082030773e-06
},
"TrainerController._save_models": {
"total": 3.4352875000040513,
"count": 1,
"self": 2.068576199992094,
"children": {
"RLTrainer._checkpoint": {
"total": 1.366711300011957,
"count": 1,
"self": 1.366711300011957
}
}
}
}
}
}
}