poca-SoccerTwos / run_logs /timers.json
Pongsaky's picture
First Push`
c328975
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.537009358406067,
"min": 1.4774773120880127,
"max": 3.2406399250030518,
"count": 1055
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 34379.82421875,
"min": 9119.171875,
"max": 144052.25,
"count": 1055
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 77.23076923076923,
"min": 66.24657534246575,
"max": 999.0,
"count": 1055
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20080.0,
"min": 7216.0,
"max": 33680.0,
"count": 1055
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1489.9507179011287,
"min": 1198.8526709796429,
"max": 1540.1731965299307,
"count": 1043
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 193693.59332714672,
"min": 2398.865750760123,
"max": 220121.50307454198,
"count": 1043
},
"SoccerTwos.Step.mean": {
"value": 10819900.0,
"min": 279812.0,
"max": 10819900.0,
"count": 1055
},
"SoccerTwos.Step.sum": {
"value": 10819900.0,
"min": 279812.0,
"max": 10819900.0,
"count": 1055
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.007840106263756752,
"min": -0.12258293479681015,
"max": 0.12172777950763702,
"count": 1055
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -1.01137375831604,
"min": -13.802045822143555,
"max": 11.253630638122559,
"count": 1055
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.007767575792968273,
"min": -0.12772130966186523,
"max": 0.12219181656837463,
"count": 1055
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -1.0020172595977783,
"min": -13.958074569702148,
"max": 11.576162338256836,
"count": 1055
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1055
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1055
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.020139538040456845,
"min": -0.7202909101139415,
"max": 0.4125411790959975,
"count": 1055
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -2.598000407218933,
"min": -41.9579998254776,
"max": 35.55159991979599,
"count": 1055
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.020139538040456845,
"min": -0.7202909101139415,
"max": 0.4125411790959975,
"count": 1055
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -2.598000407218933,
"min": -41.9579998254776,
"max": 35.55159991979599,
"count": 1055
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1055
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1055
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.038426535919037025,
"min": 0.012255911685861065,
"max": 0.038426535919037025,
"count": 105
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.038426535919037025,
"min": 0.012255911685861065,
"max": 0.038426535919037025,
"count": 105
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.03296540471104284,
"min": 0.0009833336504622518,
"max": 0.049238433285305895,
"count": 105
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.03296540471104284,
"min": 0.0009833336504622518,
"max": 0.049238433285305895,
"count": 105
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.044728918373584746,
"min": 0.000999350169998555,
"max": 0.05651829618339737,
"count": 105
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.044728918373584746,
"min": 0.000999350169998555,
"max": 0.05651829618339737,
"count": 105
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 105
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 105
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000004,
"min": 0.20000000000000004,
"max": 0.20000000000000004,
"count": 105
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000004,
"min": 0.20000000000000004,
"max": 0.20000000000000004,
"count": 105
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.002000000000000001,
"min": 0.002000000000000001,
"max": 0.002000000000000001,
"count": 105
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.002000000000000001,
"min": 0.002000000000000001,
"max": 0.002000000000000001,
"count": 105
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1703589289",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\Pongs\\miniconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume --torch-device cuda",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2",
"numpy_version": "1.26.2",
"end_time_seconds": "1703644542"
},
"total": 55255.214438800016,
"count": 1,
"self": 10.107869200001005,
"children": {
"run_training.setup": {
"total": 0.20848850000766106,
"count": 1,
"self": 0.20848850000766106
},
"TrainerController.start_learning": {
"total": 55244.89808110001,
"count": 1,
"self": 28.544168001550133,
"children": {
"TrainerController._reset_env": {
"total": 21.239693500072462,
"count": 107,
"self": 21.239693500072462
},
"TrainerController.advance": {
"total": 55194.01206489839,
"count": 708879,
"self": 33.24450540417456,
"children": {
"env_step": {
"total": 31680.732621098447,
"count": 708879,
"self": 17528.450730892466,
"children": {
"SubprocessEnvManager._take_step": {
"total": 14133.576586199284,
"count": 708879,
"self": 230.8581934912363,
"children": {
"TorchPolicy.evaluate": {
"total": 13902.718392708048,
"count": 1348968,
"self": 13902.718392708048
}
}
},
"workers": {
"total": 18.70530400669668,
"count": 708879,
"self": 0.0,
"children": {
"worker_root": {
"total": 55193.046218404095,
"count": 708879,
"is_parallel": true,
"self": 41511.166203701985,
"children": {
"steps_from_proto": {
"total": 0.48548469970410224,
"count": 214,
"is_parallel": true,
"self": 0.10035940013767686,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.3851252995664254,
"count": 856,
"is_parallel": true,
"self": 0.3851252995664254
}
}
},
"UnityEnvironment.step": {
"total": 13681.394530002406,
"count": 708879,
"is_parallel": true,
"self": 755.9751880668191,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 631.3339543965994,
"count": 708879,
"is_parallel": true,
"self": 631.3339543965994
},
"communicator.exchange": {
"total": 9695.640459309463,
"count": 708879,
"is_parallel": true,
"self": 9695.640459309463
},
"steps_from_proto": {
"total": 2598.4449282295245,
"count": 1417758,
"is_parallel": true,
"self": 518.821084318959,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2079.6238439105655,
"count": 5671032,
"is_parallel": true,
"self": 2079.6238439105655
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 23480.034938395765,
"count": 708878,
"self": 196.98457979380328,
"children": {
"process_trajectory": {
"total": 6295.509820201958,
"count": 708878,
"self": 6265.804239801975,
"children": {
"RLTrainer._checkpoint": {
"total": 29.705580399982864,
"count": 21,
"self": 29.705580399982864
}
}
},
"_update_policy": {
"total": 16987.540538400004,
"count": 105,
"self": 5798.289395300788,
"children": {
"TorchPOCAOptimizer.update": {
"total": 11189.251143099216,
"count": 12600,
"self": 11189.251143099216
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.3999891709536314e-06,
"count": 1,
"self": 2.3999891709536314e-06
},
"TrainerController._save_models": {
"total": 1.1021523000090383,
"count": 1,
"self": 0.1637812999833841,
"children": {
"RLTrainer._checkpoint": {
"total": 0.9383710000256542,
"count": 1,
"self": 0.9383710000256542
}
}
}
}
}
}
}