poca-SoccerTwos / run_logs /timers.json
jrauch4's picture
First Try
e965890
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.652201771736145,
"min": 1.6454737186431885,
"max": 3.295718193054199,
"count": 1064
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 31775.14453125,
"min": 11873.9892578125,
"max": 125012.0625,
"count": 1064
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 57.811764705882354,
"min": 42.19827586206897,
"max": 999.0,
"count": 1064
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19656.0,
"min": 9712.0,
"max": 28624.0,
"count": 1064
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1617.0756943274994,
"min": 1187.911753721827,
"max": 1647.4013844219048,
"count": 998
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 274902.8680356749,
"min": 2376.9959188058983,
"max": 376134.1197163431,
"count": 998
},
"SoccerTwos.Step.mean": {
"value": 10639998.0,
"min": 9540.0,
"max": 10639998.0,
"count": 1064
},
"SoccerTwos.Step.sum": {
"value": 10639998.0,
"min": 9540.0,
"max": 10639998.0,
"count": 1064
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.015043281018733978,
"min": -0.12756769359111786,
"max": 0.2135513424873352,
"count": 1064
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -2.5423145294189453,
"min": -22.38164520263672,
"max": 27.06729507446289,
"count": 1064
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.012198357842862606,
"min": -0.12327215075492859,
"max": 0.20781882107257843,
"count": 1064
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -2.0615224838256836,
"min": -23.006736755371094,
"max": 26.394203186035156,
"count": 1064
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1064
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1064
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.06232189638374825,
"min": -0.5494315812462255,
"max": 0.6391851843139271,
"count": 1064
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 10.532400488853455,
"min": -51.174399852752686,
"max": 63.113799929618835,
"count": 1064
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.06232189638374825,
"min": -0.5494315812462255,
"max": 0.6391851843139271,
"count": 1064
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 10.532400488853455,
"min": -51.174399852752686,
"max": 63.113799929618835,
"count": 1064
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1064
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1064
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017264949838863685,
"min": 0.010595528713505093,
"max": 0.02360185617969061,
"count": 510
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017264949838863685,
"min": 0.010595528713505093,
"max": 0.02360185617969061,
"count": 510
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10992388625939688,
"min": 1.070836359910269e-06,
"max": 0.12059773579239845,
"count": 510
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10992388625939688,
"min": 1.070836359910269e-06,
"max": 0.12059773579239845,
"count": 510
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11164524704217911,
"min": 1.0474380530922645e-06,
"max": 0.12335343509912491,
"count": 510
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11164524704217911,
"min": 1.0474380530922645e-06,
"max": 0.12335343509912491,
"count": 510
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 510
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 510
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 510
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 510
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 510
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 510
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677587703",
"python_version": "3.9.16 | packaged by conda-forge | (main, Feb 1 2023, 21:28:38) [MSC v.1929 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\jonas\\.conda\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cu117",
"numpy_version": "1.21.2",
"end_time_seconds": "1677615712"
},
"total": 28008.975405,
"count": 1,
"self": 0.06407110000145622,
"children": {
"run_training.setup": {
"total": 0.13176489999999985,
"count": 1,
"self": 0.13176489999999985
},
"TrainerController.start_learning": {
"total": 28008.779569,
"count": 1,
"self": 22.856993000114016,
"children": {
"TrainerController._reset_env": {
"total": 7.18871370000327,
"count": 54,
"self": 7.18871370000327
},
"TrainerController.advance": {
"total": 27978.46382059988,
"count": 721773,
"self": 21.633592700491135,
"children": {
"env_step": {
"total": 19927.397306399507,
"count": 721773,
"self": 12892.338716699054,
"children": {
"SubprocessEnvManager._take_step": {
"total": 7020.501626300867,
"count": 721773,
"self": 135.44392550302746,
"children": {
"TorchPolicy.evaluate": {
"total": 6885.05770079784,
"count": 1345596,
"self": 6885.05770079784
}
}
},
"workers": {
"total": 14.55696339958487,
"count": 721772,
"self": 0.0,
"children": {
"worker_root": {
"total": 27972.65793990101,
"count": 721772,
"is_parallel": true,
"self": 17872.508112801162,
"children": {
"steps_from_proto": {
"total": 0.1578121999962363,
"count": 108,
"is_parallel": true,
"self": 0.03201969997553755,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.12579250002069875,
"count": 432,
"is_parallel": true,
"self": 0.12579250002069875
}
}
},
"UnityEnvironment.step": {
"total": 10099.992014899854,
"count": 721772,
"is_parallel": true,
"self": 518.7492209000047,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 500.0139680008525,
"count": 721772,
"is_parallel": true,
"self": 500.0139680008525
},
"communicator.exchange": {
"total": 7328.479141999926,
"count": 721772,
"is_parallel": true,
"self": 7328.479141999926
},
"steps_from_proto": {
"total": 1752.7496839990706,
"count": 1443544,
"is_parallel": true,
"self": 364.37890759439665,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1388.370776404674,
"count": 5774176,
"is_parallel": true,
"self": 1388.370776404674
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 8029.432921499883,
"count": 721772,
"self": 150.58379579841676,
"children": {
"process_trajectory": {
"total": 3185.799990801456,
"count": 721772,
"self": 3180.7412104014497,
"children": {
"RLTrainer._checkpoint": {
"total": 5.058780400006071,
"count": 21,
"self": 5.058780400006071
}
}
},
"_update_policy": {
"total": 4693.049134900011,
"count": 510,
"self": 3000.620772200081,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1692.4283626999302,
"count": 15306,
"self": 1692.4283626999302
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3999997463542968e-06,
"count": 1,
"self": 1.3999997463542968e-06
},
"TrainerController._save_models": {
"total": 0.27004029999807244,
"count": 1,
"self": 0.04116039999644272,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22887990000162972,
"count": 1,
"self": 0.22887990000162972
}
}
}
}
}
}
}