poca-SoccerTwos / run_logs /timers.json
mchen-hf-2023's picture
First Push`
c1a7d7f
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.847357988357544,
"min": 1.8039144277572632,
"max": 3.2957615852355957,
"count": 643
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 36533.3515625,
"min": 13936.708984375,
"max": 129445.9765625,
"count": 643
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 62.620253164556964,
"min": 41.279661016949156,
"max": 999.0,
"count": 643
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19788.0,
"min": 12412.0,
"max": 29196.0,
"count": 643
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1594.615008064832,
"min": 1171.09056039849,
"max": 1611.076103789316,
"count": 587
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 251949.17127424345,
"min": 2342.18112079698,
"max": 371587.3148275218,
"count": 587
},
"SoccerTwos.Step.mean": {
"value": 6429966.0,
"min": 9614.0,
"max": 6429966.0,
"count": 643
},
"SoccerTwos.Step.sum": {
"value": 6429966.0,
"min": 9614.0,
"max": 6429966.0,
"count": 643
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.029824908822774887,
"min": -0.08514673262834549,
"max": 0.1940293163061142,
"count": 643
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -4.712335586547852,
"min": -16.979042053222656,
"max": 28.993396759033203,
"count": 643
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.02964496612548828,
"min": -0.08863995969295502,
"max": 0.19637098908424377,
"count": 643
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -4.683904647827148,
"min": -17.955656051635742,
"max": 29.506513595581055,
"count": 643
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 643
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 643
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.21634936709947225,
"min": -0.6153846153846154,
"max": 0.4518156881425895,
"count": 643
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -34.183200001716614,
"min": -47.33100003004074,
"max": 72.88959991931915,
"count": 643
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.21634936709947225,
"min": -0.6153846153846154,
"max": 0.4518156881425895,
"count": 643
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -34.183200001716614,
"min": -47.33100003004074,
"max": 72.88959991931915,
"count": 643
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 643
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 643
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.0180307745715254,
"min": 0.010349092696560546,
"max": 0.021931897252216004,
"count": 307
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.0180307745715254,
"min": 0.010349092696560546,
"max": 0.021931897252216004,
"count": 307
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.11718054562807083,
"min": 2.3144507821371008e-07,
"max": 0.12711331248283386,
"count": 307
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.11718054562807083,
"min": 2.3144507821371008e-07,
"max": 0.12711331248283386,
"count": 307
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.1189131701985995,
"min": 2.6177915941616446e-07,
"max": 0.12865767553448676,
"count": 307
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.1189131701985995,
"min": 2.6177915941616446e-07,
"max": 0.12865767553448676,
"count": 307
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 307
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 307
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 307
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 307
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 307
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 307
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1696445858",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\vivin\\Anaconda3\\envs\\hfrl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1696530297"
},
"total": 84443.6438798,
"count": 1,
"self": 2.986561099998653,
"children": {
"run_training.setup": {
"total": 0.1693769999999999,
"count": 1,
"self": 0.1693769999999999
},
"TrainerController.start_learning": {
"total": 84440.4879417,
"count": 1,
"self": 15.064017405602499,
"children": {
"TrainerController._reset_env": {
"total": 5.9058642999637625,
"count": 33,
"self": 5.9058642999637625
},
"TrainerController.advance": {
"total": 84419.28877399444,
"count": 434518,
"self": 15.178462592259166,
"children": {
"env_step": {
"total": 10781.812579299307,
"count": 434518,
"self": 7998.074253597756,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2774.2928609035,
"count": 434518,
"self": 93.24658060185993,
"children": {
"TorchPolicy.evaluate": {
"total": 2681.04628030164,
"count": 816594,
"self": 2681.04628030164
}
}
},
"workers": {
"total": 9.445464798050937,
"count": 434518,
"self": 0.0,
"children": {
"worker_root": {
"total": 84415.55031200343,
"count": 434518,
"is_parallel": true,
"self": 78125.50294890224,
"children": {
"steps_from_proto": {
"total": 0.1131358000279068,
"count": 66,
"is_parallel": true,
"self": 0.021666400003675257,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.09146940002423154,
"count": 264,
"is_parallel": true,
"self": 0.09146940002423154
}
}
},
"UnityEnvironment.step": {
"total": 6289.934227301162,
"count": 434518,
"is_parallel": true,
"self": 392.26629980469625,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 317.1448525998336,
"count": 434518,
"is_parallel": true,
"self": 317.1448525998336
},
"communicator.exchange": {
"total": 4357.267293398496,
"count": 434518,
"is_parallel": true,
"self": 4357.267293398496
},
"steps_from_proto": {
"total": 1223.2557814981355,
"count": 869036,
"is_parallel": true,
"self": 241.211575695977,
"children": {
"_process_rank_one_or_two_observation": {
"total": 982.0442058021586,
"count": 3476144,
"is_parallel": true,
"self": 982.0442058021586
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 73622.29773210287,
"count": 434518,
"self": 103.21380430168938,
"children": {
"process_trajectory": {
"total": 2728.664671401102,
"count": 434518,
"self": 2725.696154101119,
"children": {
"RLTrainer._checkpoint": {
"total": 2.9685172999829774,
"count": 12,
"self": 2.9685172999829774
}
}
},
"_update_policy": {
"total": 70790.41925640008,
"count": 307,
"self": 12139.413613700468,
"children": {
"TorchPOCAOptimizer.update": {
"total": 58651.005642699616,
"count": 9213,
"self": 58651.005642699616
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.0000006770715117e-06,
"count": 1,
"self": 2.0000006770715117e-06
},
"TrainerController._save_models": {
"total": 0.22928400000091642,
"count": 1,
"self": 0.010616599989589304,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21866740001132712,
"count": 1,
"self": 0.21866740001132712
}
}
}
}
}
}
}