poca-SoccerTwos / run_logs /timers.json
c-wang's picture
First Push`
d27150f
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.775998592376709,
"min": 1.6040146350860596,
"max": 3.2957441806793213,
"count": 5000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 35747.30078125,
"min": 5111.42138671875,
"max": 138394.046875,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 82.56666666666666,
"min": 55.28888888888889,
"max": 999.0,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19816.0,
"min": 15564.0,
"max": 24952.0,
"count": 5000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1573.510181424287,
"min": 1176.3367770620202,
"max": 1643.2444482422695,
"count": 2891
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 188821.22177091445,
"min": 2354.063791122488,
"max": 291336.48131911457,
"count": 2891
},
"SoccerTwos.Step.mean": {
"value": 49999865.0,
"min": 9498.0,
"max": 49999865.0,
"count": 5000
},
"SoccerTwos.Step.sum": {
"value": 49999865.0,
"min": 9498.0,
"max": 49999865.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.06021175906062126,
"min": -0.29010486602783203,
"max": 0.12045252323150635,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -7.2254109382629395,
"min": -18.919986724853516,
"max": 9.369237899780273,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.05843336135149002,
"min": -0.29126614332199097,
"max": 0.12659993767738342,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -7.012003421783447,
"min": -19.16312026977539,
"max": 9.595020294189453,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.06249999751647314,
"min": -0.8743764705517713,
"max": 0.6009942889213562,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -7.499999701976776,
"min": -55.73639988899231,
"max": 43.6576002240181,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.06249999751647314,
"min": -0.8743764705517713,
"max": 0.6009942889213562,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -7.499999701976776,
"min": -55.73639988899231,
"max": 43.6576002240181,
"count": 5000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016774558438919486,
"min": 0.010072278234777817,
"max": 0.025630577544992168,
"count": 2341
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016774558438919486,
"min": 0.010072278234777817,
"max": 0.025630577544992168,
"count": 2341
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09155314341187477,
"min": 3.240071249493992e-11,
"max": 0.09859520792961121,
"count": 2341
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09155314341187477,
"min": 3.240071249493992e-11,
"max": 0.09859520792961121,
"count": 2341
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09291486019889514,
"min": 4.150054478309813e-11,
"max": 0.10025302891929945,
"count": 2341
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09291486019889514,
"min": 4.150054478309813e-11,
"max": 0.10025302891929945,
"count": 2341
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2341
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2341
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 2341
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 2341
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2341
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2341
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1703464603",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\chang\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1703627288"
},
"total": 162713.8910461,
"count": 1,
"self": 2.877047700021649,
"children": {
"run_training.setup": {
"total": 0.10487579999971786,
"count": 1,
"self": 0.10487579999971786
},
"TrainerController.start_learning": {
"total": 162710.9091226,
"count": 1,
"self": 95.03098711458733,
"children": {
"TrainerController._reset_env": {
"total": 22.870975599962094,
"count": 250,
"self": 22.870975599962094
},
"TrainerController.advance": {
"total": 162592.78000478543,
"count": 3285140,
"self": 99.11501299220254,
"children": {
"env_step": {
"total": 66458.05725349297,
"count": 3285140,
"self": 49901.25500668623,
"children": {
"SubprocessEnvManager._take_step": {
"total": 16496.2410986158,
"count": 3285140,
"self": 579.729718210041,
"children": {
"TorchPolicy.evaluate": {
"total": 15916.511380405758,
"count": 6413848,
"self": 15916.511380405758
}
}
},
"workers": {
"total": 60.56114819093136,
"count": 3285140,
"self": 0.0,
"children": {
"worker_root": {
"total": 162593.12074150195,
"count": 3285140,
"is_parallel": true,
"self": 124623.27507769279,
"children": {
"steps_from_proto": {
"total": 0.5632270000055541,
"count": 500,
"is_parallel": true,
"self": 0.10653560005812324,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.4566913999474309,
"count": 2000,
"is_parallel": true,
"self": 0.4566913999474309
}
}
},
"UnityEnvironment.step": {
"total": 37969.28243680915,
"count": 3285140,
"is_parallel": true,
"self": 2018.8212593716598,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2098.5394663058187,
"count": 3285140,
"is_parallel": true,
"self": 2098.5394663058187
},
"communicator.exchange": {
"total": 26754.034175819048,
"count": 3285140,
"is_parallel": true,
"self": 26754.034175819048
},
"steps_from_proto": {
"total": 7097.887535312628,
"count": 6570280,
"is_parallel": true,
"self": 1349.507690489363,
"children": {
"_process_rank_one_or_two_observation": {
"total": 5748.379844823265,
"count": 26281120,
"is_parallel": true,
"self": 5748.379844823265
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 96035.60773830024,
"count": 3285140,
"self": 705.0338465059904,
"children": {
"process_trajectory": {
"total": 13425.446054794105,
"count": 3285140,
"self": 13407.045061593923,
"children": {
"RLTrainer._checkpoint": {
"total": 18.40099320018271,
"count": 100,
"self": 18.40099320018271
}
}
},
"_update_policy": {
"total": 81905.12783700015,
"count": 2341,
"self": 8293.239140300793,
"children": {
"TorchPOCAOptimizer.update": {
"total": 73611.88869669936,
"count": 70230,
"self": 73611.88869669936
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.00006853044033e-07,
"count": 1,
"self": 9.00006853044033e-07
},
"TrainerController._save_models": {
"total": 0.227154199994402,
"count": 1,
"self": 0.05800160000217147,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16915259999223053,
"count": 1,
"self": 0.16915259999223053
}
}
}
}
}
}
}