poca-SoccerTwos / run_logs /timers.json
SecondTheFirst's picture
First Push`
f66caa0 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.2800854444503784,
"min": 1.2717479467391968,
"max": 3.2957215309143066,
"count": 4367
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 27445.03125,
"min": 10272.7109375,
"max": 132977.9375,
"count": 4367
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 58.0,
"min": 40.049180327868854,
"max": 999.0,
"count": 4367
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20184.0,
"min": 7992.0,
"max": 29760.0,
"count": 4367
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1726.9090089483225,
"min": 1197.1001127243378,
"max": 1761.865470780837,
"count": 4364
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 300482.1675570081,
"min": 2394.696090291733,
"max": 398827.06342036114,
"count": 4364
},
"SoccerTwos.Step.mean": {
"value": 43669962.0,
"min": 9296.0,
"max": 43669962.0,
"count": 4367
},
"SoccerTwos.Step.sum": {
"value": 43669962.0,
"min": 9296.0,
"max": 43669962.0,
"count": 4367
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.039809733629226685,
"min": -0.134344220161438,
"max": 0.21200495958328247,
"count": 4367
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -6.887084007263184,
"min": -24.31344985961914,
"max": 33.40375518798828,
"count": 4367
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.03556210920214653,
"min": -0.13364821672439575,
"max": 0.21488294005393982,
"count": 4367
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -6.152245044708252,
"min": -23.749666213989258,
"max": 33.506019592285156,
"count": 4367
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 4367
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 4367
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.11409826464735703,
"min": -0.7389028566224235,
"max": 0.5161789451774798,
"count": 4367
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -19.738999783992767,
"min": -79.13860011100769,
"max": 74.17319989204407,
"count": 4367
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.11409826464735703,
"min": -0.7389028566224235,
"max": 0.5161789451774798,
"count": 4367
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -19.738999783992767,
"min": -79.13860011100769,
"max": 74.17319989204407,
"count": 4367
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 4367
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 4367
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017341240340222917,
"min": 0.01008181855819809,
"max": 0.026270056962190817,
"count": 2118
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017341240340222917,
"min": 0.01008181855819809,
"max": 0.026270056962190817,
"count": 2118
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.11143563985824585,
"min": 0.0007035023275723992,
"max": 0.12950959677497545,
"count": 2118
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.11143563985824585,
"min": 0.0007035023275723992,
"max": 0.12950959677497545,
"count": 2118
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11262097284197807,
"min": 0.0007161962993753453,
"max": 0.1324326738715172,
"count": 2118
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11262097284197807,
"min": 0.0007161962993753453,
"max": 0.1324326738715172,
"count": 2118
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2118
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2118
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 2118
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 2118
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2118
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2118
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1708089878",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\Admin\\anaconda3\\envs\\DRLhf\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.0",
"numpy_version": "1.23.5",
"end_time_seconds": "1708157037"
},
"total": 67160.2643288,
"count": 1,
"self": 2.0084765999927185,
"children": {
"run_training.setup": {
"total": 0.10348479999811389,
"count": 1,
"self": 0.10348479999811389
},
"TrainerController.start_learning": {
"total": 67158.1523674,
"count": 1,
"self": 47.09535559063079,
"children": {
"TrainerController._reset_env": {
"total": 5.942646700015757,
"count": 219,
"self": 5.942646700015757
},
"TrainerController.advance": {
"total": 67104.91934140938,
"count": 3023040,
"self": 52.590495008043945,
"children": {
"env_step": {
"total": 49672.14214958492,
"count": 3023040,
"self": 28721.916673215717,
"children": {
"SubprocessEnvManager._take_step": {
"total": 20919.913189628744,
"count": 3023040,
"self": 319.52815042139264,
"children": {
"TorchPolicy.evaluate": {
"total": 20600.38503920735,
"count": 5483682,
"self": 20600.38503920735
}
}
},
"workers": {
"total": 30.312286740459967,
"count": 3023040,
"self": 0.0,
"children": {
"worker_root": {
"total": 67081.3145659925,
"count": 3023040,
"is_parallel": true,
"self": 43501.80031698538,
"children": {
"steps_from_proto": {
"total": 0.2578525001881644,
"count": 438,
"is_parallel": true,
"self": 0.051626899832626805,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.2062256003555376,
"count": 1752,
"is_parallel": true,
"self": 0.2062256003555376
}
}
},
"UnityEnvironment.step": {
"total": 23579.256396506942,
"count": 3023040,
"is_parallel": true,
"self": 1185.1771390245995,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 777.977017760626,
"count": 3023040,
"is_parallel": true,
"self": 777.977017760626
},
"communicator.exchange": {
"total": 17857.595922984474,
"count": 3023040,
"is_parallel": true,
"self": 17857.595922984474
},
"steps_from_proto": {
"total": 3758.5063167372427,
"count": 6046080,
"is_parallel": true,
"self": 742.5215650333557,
"children": {
"_process_rank_one_or_two_observation": {
"total": 3015.984751703887,
"count": 24184320,
"is_parallel": true,
"self": 3015.984751703887
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 17380.18669681641,
"count": 3023040,
"self": 342.63538001524284,
"children": {
"process_trajectory": {
"total": 9550.808672102023,
"count": 3023040,
"self": 9537.166336302034,
"children": {
"RLTrainer._checkpoint": {
"total": 13.64233579998836,
"count": 87,
"self": 13.64233579998836
}
}
},
"_update_policy": {
"total": 7486.742644699145,
"count": 2118,
"self": 4049.6184280940506,
"children": {
"TorchPOCAOptimizer.update": {
"total": 3437.1242166050943,
"count": 63549,
"self": 3437.1242166050943
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.0999868866056204e-06,
"count": 1,
"self": 2.0999868866056204e-06
},
"TrainerController._save_models": {
"total": 0.195021599996835,
"count": 1,
"self": 0.02721689999452792,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16780470000230707,
"count": 1,
"self": 0.16780470000230707
}
}
}
}
}
}
}