poca-SoccerTwos / run_logs /timers.json
alesthehuman's picture
v0
cca09ba
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.971757173538208,
"min": 1.9702214002609253,
"max": 3.295745611190796,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 36469.62109375,
"min": 26095.61328125,
"max": 152529.375,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 54.08888888888889,
"min": 39.642276422764226,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19472.0,
"min": 16396.0,
"max": 24236.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1505.933671931044,
"min": 1198.025705444381,
"max": 1510.0504699204894,
"count": 469
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 271068.06094758795,
"min": 2400.5182950146723,
"max": 363117.00495893107,
"count": 469
},
"SoccerTwos.Step.mean": {
"value": 4999952.0,
"min": 9134.0,
"max": 4999952.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999952.0,
"min": 9134.0,
"max": 4999952.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.0018862575525417924,
"min": -0.1087125912308693,
"max": 0.1725998818874359,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.33952635526657104,
"min": -26.634584426879883,
"max": 27.78858184814453,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.0015606041997671127,
"min": -0.1133226677775383,
"max": 0.17355959117412567,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.2809087634086609,
"min": -27.764053344726562,
"max": 27.72662353515625,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.04498777952459124,
"min": -0.5398857146501541,
"max": 0.5801333226263523,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 8.097800314426422,
"min": -64.95680010318756,
"max": 54.01600044965744,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.04498777952459124,
"min": -0.5398857146501541,
"max": 0.5801333226263523,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 8.097800314426422,
"min": -64.95680010318756,
"max": 54.01600044965744,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01639594529212142,
"min": 0.011375034862430767,
"max": 0.024761093304065677,
"count": 239
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01639594529212142,
"min": 0.011375034862430767,
"max": 0.024761093304065677,
"count": 239
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10465365772445996,
"min": 2.314404673597892e-06,
"max": 0.11985040878256162,
"count": 239
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10465365772445996,
"min": 2.314404673597892e-06,
"max": 0.11985040878256162,
"count": 239
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10639850174387296,
"min": 2.2251761189788036e-06,
"max": 0.12240360130866369,
"count": 239
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10639850174387296,
"min": 2.2251761189788036e-06,
"max": 0.12240360130866369,
"count": 239
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 239
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 239
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 239
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 239
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 239
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 239
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688571562",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn --force ./ml-agents/config/poca/SoccerTwos.yaml --env=./ml-agents/training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu115",
"numpy_version": "1.21.2",
"end_time_seconds": "1688580354"
},
"total": 8792.130980844,
"count": 1,
"self": 0.32373556700076733,
"children": {
"run_training.setup": {
"total": 0.03959353300001567,
"count": 1,
"self": 0.03959353300001567
},
"TrainerController.start_learning": {
"total": 8791.767651744,
"count": 1,
"self": 6.655949540985603,
"children": {
"TrainerController._reset_env": {
"total": 6.885630088994958,
"count": 25,
"self": 6.885630088994958
},
"TrainerController.advance": {
"total": 8777.989917254017,
"count": 340349,
"self": 6.959609317264039,
"children": {
"env_step": {
"total": 6676.239928678801,
"count": 340349,
"self": 4907.813310475121,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1764.5325698936379,
"count": 340349,
"self": 51.249021445286644,
"children": {
"TorchPolicy.evaluate": {
"total": 1713.2835484483512,
"count": 634278,
"self": 1713.2835484483512
}
}
},
"workers": {
"total": 3.8940483100425354,
"count": 340349,
"self": 0.0,
"children": {
"worker_root": {
"total": 8779.436202104116,
"count": 340349,
"is_parallel": true,
"self": 4672.470853867153,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00294004300008055,
"count": 2,
"is_parallel": true,
"self": 0.0007945350005229557,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021455079995575943,
"count": 8,
"is_parallel": true,
"self": 0.0021455079995575943
}
}
},
"UnityEnvironment.step": {
"total": 0.031158214000015505,
"count": 1,
"is_parallel": true,
"self": 0.0006777199998850847,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005000779999591032,
"count": 1,
"is_parallel": true,
"self": 0.0005000779999591032
},
"communicator.exchange": {
"total": 0.027809690000140108,
"count": 1,
"is_parallel": true,
"self": 0.027809690000140108
},
"steps_from_proto": {
"total": 0.002170726000031209,
"count": 2,
"is_parallel": true,
"self": 0.00040949699973680254,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017612290002944064,
"count": 8,
"is_parallel": true,
"self": 0.0017612290002944064
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4106.914589042968,
"count": 340348,
"is_parallel": true,
"self": 202.76129913838122,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 125.19263645896103,
"count": 340348,
"is_parallel": true,
"self": 125.19263645896103
},
"communicator.exchange": {
"total": 3141.4161885170834,
"count": 340348,
"is_parallel": true,
"self": 3141.4161885170834
},
"steps_from_proto": {
"total": 637.544464928543,
"count": 680696,
"is_parallel": true,
"self": 119.00165214230174,
"children": {
"_process_rank_one_or_two_observation": {
"total": 518.5428127862413,
"count": 2722784,
"is_parallel": true,
"self": 518.5428127862413
}
}
}
}
},
"steps_from_proto": {
"total": 0.050759193994963425,
"count": 48,
"is_parallel": true,
"self": 0.009249178002846747,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.04151001599211668,
"count": 192,
"is_parallel": true,
"self": 0.04151001599211668
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2094.7903792579523,
"count": 340349,
"self": 56.484258983399286,
"children": {
"process_trajectory": {
"total": 877.0320136615542,
"count": 340349,
"self": 874.5822189055539,
"children": {
"RLTrainer._checkpoint": {
"total": 2.4497947560003013,
"count": 10,
"self": 2.4497947560003013
}
}
},
"_update_policy": {
"total": 1161.2741066129988,
"count": 239,
"self": 813.95436433497,
"children": {
"TorchPOCAOptimizer.update": {
"total": 347.3197422780288,
"count": 7170,
"self": 347.3197422780288
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.89000909612514e-07,
"count": 1,
"self": 9.89000909612514e-07
},
"TrainerController._save_models": {
"total": 0.2361538710010791,
"count": 1,
"self": 0.0017686020019027637,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23438526899917633,
"count": 1,
"self": 0.23438526899917633
}
}
}
}
}
}
}