SoccerTwos / run_logs /timers.json
manuu01's picture
Update_agent
29d5254
raw
history blame
20.3 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.0826170444488525,
"min": 3.0529732704162598,
"max": 3.1534907817840576,
"count": 75
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 88680.7265625,
"min": 17613.982421875,
"max": 100213.796875,
"count": 75
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 460.1666666666667,
"min": 216.95238095238096,
"max": 776.0,
"count": 75
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 22088.0,
"min": 14780.0,
"max": 24932.0,
"count": 75
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1278.0626123633342,
"min": 1235.2334118823687,
"max": 1280.6322907660783,
"count": 75
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 25561.252247266686,
"min": 4989.136756322388,
"max": 51225.29163064313,
"count": 75
},
"SoccerTwos.Step.mean": {
"value": 3499730.0,
"min": 2759046.0,
"max": 3499730.0,
"count": 75
},
"SoccerTwos.Step.sum": {
"value": 3499730.0,
"min": 2759046.0,
"max": 3499730.0,
"count": 75
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.0163666233420372,
"min": -0.008200411684811115,
"max": 0.04534551501274109,
"count": 75
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.3927989602088928,
"min": -0.22961151599884033,
"max": 1.8138206005096436,
"count": 75
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.009769868105649948,
"min": -0.008772426284849644,
"max": 0.061527688056230545,
"count": 75
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.23447683453559875,
"min": -0.18506309390068054,
"max": 2.1969051361083984,
"count": 75
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 75
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 75
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.24455000211795172,
"min": -0.5335870942761821,
"max": 0.44424000233411787,
"count": 75
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -5.869200050830841,
"min": -16.541199922561646,
"max": 17.769600093364716,
"count": 75
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.24455000211795172,
"min": -0.5335870942761821,
"max": 0.44424000233411787,
"count": 75
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -5.869200050830841,
"min": -16.541199922561646,
"max": 17.769600093364716,
"count": 75
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 75
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 75
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.0163678941484856,
"min": 0.013363467935899582,
"max": 0.02322041514950494,
"count": 35
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.0163678941484856,
"min": 0.013363467935899582,
"max": 0.02322041514950494,
"count": 35
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.010916901628176372,
"min": 0.0049478520484020315,
"max": 0.014205682122459014,
"count": 35
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.010916901628176372,
"min": 0.0049478520484020315,
"max": 0.014205682122459014,
"count": 35
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.011505619001885256,
"min": 0.005038972470598916,
"max": 0.014457402875026067,
"count": 35
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.011505619001885256,
"min": 0.005038972470598916,
"max": 0.014457402875026067,
"count": 35
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003999999999999999,
"min": 0.0003999999999999999,
"max": 0.0003999999999999999,
"count": 35
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003999999999999999,
"min": 0.0003999999999999999,
"max": 0.0003999999999999999,
"count": 35
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 35
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 35
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 35
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 35
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690875386",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --resume --torch-device=cpu",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690878607"
},
"total": 3221.02588012,
"count": 1,
"self": 0.693373149999843,
"children": {
"run_training.setup": {
"total": 0.09810845300000892,
"count": 1,
"self": 0.09810845300000892
},
"TrainerController.start_learning": {
"total": 3220.234398517,
"count": 1,
"self": 1.8983804490567309,
"children": {
"TrainerController._reset_env": {
"total": 4.4790416780002715,
"count": 5,
"self": 4.4790416780002715
},
"TrainerController.advance": {
"total": 3213.611381196943,
"count": 49107,
"self": 2.13413384487194,
"children": {
"env_step": {
"total": 1764.9911412320362,
"count": 49107,
"self": 1481.702584109977,
"children": {
"SubprocessEnvManager._take_step": {
"total": 282.04487679306067,
"count": 49107,
"self": 13.714905159100056,
"children": {
"TorchPolicy.evaluate": {
"total": 268.3299716339606,
"count": 96744,
"self": 268.3299716339606
}
}
},
"workers": {
"total": 1.2436803289986642,
"count": 49107,
"self": 0.0,
"children": {
"worker_root": {
"total": 3200.7800029239825,
"count": 49107,
"is_parallel": true,
"self": 1974.9441368359787,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.012773876999972344,
"count": 2,
"is_parallel": true,
"self": 0.008126517999983207,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004647358999989137,
"count": 8,
"is_parallel": true,
"self": 0.004647358999989137
}
}
},
"UnityEnvironment.step": {
"total": 0.061160170999983166,
"count": 1,
"is_parallel": true,
"self": 0.0014477269999702003,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0010304280000354993,
"count": 1,
"is_parallel": true,
"self": 0.0010304280000354993
},
"communicator.exchange": {
"total": 0.05436771599994472,
"count": 1,
"is_parallel": true,
"self": 0.05436771599994472
},
"steps_from_proto": {
"total": 0.004314300000032745,
"count": 2,
"is_parallel": true,
"self": 0.000776908000034382,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0035373919999983627,
"count": 8,
"is_parallel": true,
"self": 0.0035373919999983627
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.014725805999944441,
"count": 8,
"is_parallel": true,
"self": 0.002693271000907771,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.01203253499903667,
"count": 32,
"is_parallel": true,
"self": 0.01203253499903667
}
}
},
"UnityEnvironment.step": {
"total": 1225.8211402820039,
"count": 49106,
"is_parallel": true,
"self": 74.23583136709681,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 39.960131956988164,
"count": 49106,
"is_parallel": true,
"self": 39.960131956988164
},
"communicator.exchange": {
"total": 880.7466795869734,
"count": 49106,
"is_parallel": true,
"self": 880.7466795869734
},
"steps_from_proto": {
"total": 230.87849737094558,
"count": 98212,
"is_parallel": true,
"self": 41.72440882700482,
"children": {
"_process_rank_one_or_two_observation": {
"total": 189.15408854394076,
"count": 392848,
"is_parallel": true,
"self": 189.15408854394076
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1446.486106120035,
"count": 49107,
"self": 15.844304215122065,
"children": {
"process_trajectory": {
"total": 198.5306375489116,
"count": 49107,
"self": 198.03153285191172,
"children": {
"RLTrainer._checkpoint": {
"total": 0.49910469699989335,
"count": 2,
"self": 0.49910469699989335
}
}
},
"_update_policy": {
"total": 1232.1111643560014,
"count": 35,
"self": 207.44615126301028,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1024.665013092991,
"count": 1050,
"self": 1024.665013092991
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3130002116668038e-06,
"count": 1,
"self": 1.3130002116668038e-06
},
"TrainerController._save_models": {
"total": 0.24559387999988758,
"count": 1,
"self": 0.002991471999848727,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24260240800003885,
"count": 1,
"self": 0.24260240800003885
}
}
}
}
}
}
}