poca-SoccerTwos / run_logs /timers.json
itsdhanoob's picture
First Push`
e315560 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.3003082275390625,
"min": 2.3003082275390625,
"max": 3.2957077026367188,
"count": 405
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 40190.984375,
"min": 16817.07421875,
"max": 113592.3125,
"count": 405
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 69.55882352941177,
"min": 53.10752688172043,
"max": 999.0,
"count": 405
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 18920.0,
"min": 13400.0,
"max": 27060.0,
"count": 405
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1414.063386316252,
"min": 1182.052676132077,
"max": 1418.8052697466615,
"count": 357
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 192312.62053901027,
"min": 2386.8420001079744,
"max": 260313.92000269232,
"count": 357
},
"SoccerTwos.Step.mean": {
"value": 4049919.0,
"min": 9546.0,
"max": 4049919.0,
"count": 405
},
"SoccerTwos.Step.sum": {
"value": 4049919.0,
"min": 9546.0,
"max": 4049919.0,
"count": 405
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.040839776396751404,
"min": -0.07319578528404236,
"max": 0.15706507861614227,
"count": 405
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 5.5950493812561035,
"min": -7.2858405113220215,
"max": 26.086280822753906,
"count": 405
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.039607834070920944,
"min": -0.07316450774669647,
"max": 0.15821823477745056,
"count": 405
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 5.426273345947266,
"min": -7.217824459075928,
"max": 27.371755599975586,
"count": 405
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 405
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 405
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.14294890287148693,
"min": -0.6,
"max": 0.47979999686542313,
"count": 405
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 19.583999693393707,
"min": -29.241599678993225,
"max": 42.44939982891083,
"count": 405
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.14294890287148693,
"min": -0.6,
"max": 0.47979999686542313,
"count": 405
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 19.583999693393707,
"min": -29.241599678993225,
"max": 42.44939982891083,
"count": 405
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 405
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 405
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016164032027396993,
"min": 0.0106964270855921,
"max": 0.0229114227565636,
"count": 191
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016164032027396993,
"min": 0.0106964270855921,
"max": 0.0229114227565636,
"count": 191
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09139165009061495,
"min": 3.561488758426397e-07,
"max": 0.09471063191692035,
"count": 191
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09139165009061495,
"min": 3.561488758426397e-07,
"max": 0.09471063191692035,
"count": 191
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09296527057886124,
"min": 6.305631112960933e-07,
"max": 0.09628343259294828,
"count": 191
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09296527057886124,
"min": 6.305631112960933e-07,
"max": 0.09628343259294828,
"count": 191
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 191
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 191
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 191
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 191
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 191
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 191
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1708012292",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\Dhananjay\\miniconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.0+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1708025028"
},
"total": 12735.595193699999,
"count": 1,
"self": 1.462846799997351,
"children": {
"run_training.setup": {
"total": 0.09709810000003927,
"count": 1,
"self": 0.09709810000003927
},
"TrainerController.start_learning": {
"total": 12734.0352488,
"count": 1,
"self": 5.861870699991414,
"children": {
"TrainerController._reset_env": {
"total": 16.84424420000107,
"count": 21,
"self": 16.84424420000107
},
"TrainerController.advance": {
"total": 12711.217856500009,
"count": 267864,
"self": 5.466696299608884,
"children": {
"env_step": {
"total": 4134.088494500329,
"count": 267864,
"self": 3163.801560300176,
"children": {
"SubprocessEnvManager._take_step": {
"total": 966.4848599002754,
"count": 267864,
"self": 31.90135360082047,
"children": {
"TorchPolicy.evaluate": {
"total": 934.5835062994549,
"count": 517690,
"self": 934.5835062994549
}
}
},
"workers": {
"total": 3.802074299877745,
"count": 267864,
"self": 0.0,
"children": {
"worker_root": {
"total": 12686.005680700559,
"count": 267864,
"is_parallel": true,
"self": 10258.134558000176,
"children": {
"steps_from_proto": {
"total": 0.0368874000004098,
"count": 42,
"is_parallel": true,
"self": 0.007261700012236361,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.029625699988173437,
"count": 168,
"is_parallel": true,
"self": 0.029625699988173437
}
}
},
"UnityEnvironment.step": {
"total": 2427.8342353003814,
"count": 267864,
"is_parallel": true,
"self": 143.70302330030836,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 140.94436670031018,
"count": 267864,
"is_parallel": true,
"self": 140.94436670031018
},
"communicator.exchange": {
"total": 1696.7340831000442,
"count": 267864,
"is_parallel": true,
"self": 1696.7340831000442
},
"steps_from_proto": {
"total": 446.45276219971834,
"count": 535728,
"is_parallel": true,
"self": 91.21260090003125,
"children": {
"_process_rank_one_or_two_observation": {
"total": 355.2401612996871,
"count": 2142912,
"is_parallel": true,
"self": 355.2401612996871
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 8571.66266570007,
"count": 267864,
"self": 48.436712600381725,
"children": {
"process_trajectory": {
"total": 1168.526731999696,
"count": 267864,
"self": 1167.4441852996952,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0825467000008757,
"count": 8,
"self": 1.0825467000008757
}
}
},
"_update_policy": {
"total": 7354.699221099992,
"count": 192,
"self": 550.1508125999871,
"children": {
"TorchPOCAOptimizer.update": {
"total": 6804.548408500005,
"count": 5750,
"self": 6804.548408500005
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4999986888142303e-06,
"count": 1,
"self": 1.4999986888142303e-06
},
"TrainerController._save_models": {
"total": 0.11127590000069176,
"count": 1,
"self": 0.007658700000320096,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10361720000037167,
"count": 1,
"self": 0.10361720000037167
}
}
}
}
}
}
}