yizhangliu's picture
First Push
43f454b
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.4938695430755615,
"min": 1.3796327114105225,
"max": 1.5141361951828003,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 28825.70703125,
"min": 24890.60546875,
"max": 35125.7734375,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 68.8,
"min": 46.02857142857143,
"max": 90.12727272727273,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19264.0,
"min": 18284.0,
"max": 21196.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1704.0071478243858,
"min": 1655.3515332922498,
"max": 1741.5080761339946,
"count": 500
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 238561.00069541403,
"min": 187472.29564841586,
"max": 367380.9277837126,
"count": 500
},
"SoccerTwos.Step.mean": {
"value": 24999987.0,
"min": 20009984.0,
"max": 24999987.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 24999987.0,
"min": 20009984.0,
"max": 24999987.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.04771682247519493,
"min": -0.11392596364021301,
"max": 0.07802440971136093,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -6.728072166442871,
"min": -19.840782165527344,
"max": 13.030076026916504,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.05018610134720802,
"min": -0.11536986380815506,
"max": 0.07821722328662872,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -7.076240539550781,
"min": -19.594406127929688,
"max": 13.062276840209961,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.07070070970142987,
"min": -0.47185195034200494,
"max": 0.3418256380619147,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -9.968800067901611,
"min": -72.66520035266876,
"max": 53.32479953765869,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.07070070970142987,
"min": -0.47185195034200494,
"max": 0.3418256380619147,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -9.968800067901611,
"min": -72.66520035266876,
"max": 53.32479953765869,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.015609942570639154,
"min": 0.011553063154375801,
"max": 0.02369201917220683,
"count": 242
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.015609942570639154,
"min": 0.011553063154375801,
"max": 0.02369201917220683,
"count": 242
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09380630056063334,
"min": 0.08720070496201515,
"max": 0.11908681665857633,
"count": 242
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09380630056063334,
"min": 0.08720070496201515,
"max": 0.11908681665857633,
"count": 242
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09497580602765084,
"min": 0.08831912775834401,
"max": 0.12188777426878611,
"count": 242
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09497580602765084,
"min": 0.08831912775834401,
"max": 0.12188777426878611,
"count": 242
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 242
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 242
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 242
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 242
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 242
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 242
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676065575",
"python_version": "3.9.16 (main, Feb 6 2023, 20:06:40) \n[GCC 9.3.0]",
"command_line_arguments": "/usr/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1676087104"
},
"total": 21528.747732700023,
"count": 1,
"self": 0.7254941999563016,
"children": {
"run_training.setup": {
"total": 0.026981900038663298,
"count": 1,
"self": 0.026981900038663298
},
"TrainerController.start_learning": {
"total": 21527.99525660003,
"count": 1,
"self": 12.144751194049604,
"children": {
"TrainerController._reset_env": {
"total": 1.524433900078293,
"count": 26,
"self": 1.524433900078293
},
"TrainerController.advance": {
"total": 21514.10447030596,
"count": 345891,
"self": 13.490233229356818,
"children": {
"env_step": {
"total": 11501.870636371139,
"count": 345891,
"self": 9949.838601987227,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1544.709216009418,
"count": 345891,
"self": 67.26746340212412,
"children": {
"TorchPolicy.evaluate": {
"total": 1477.441752607294,
"count": 627882,
"self": 1477.441752607294
}
}
},
"workers": {
"total": 7.322818374494091,
"count": 345891,
"self": 0.0,
"children": {
"worker_root": {
"total": 21500.550929590652,
"count": 345891,
"is_parallel": true,
"self": 12809.171533371555,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0032686000340618193,
"count": 2,
"is_parallel": true,
"self": 0.0008765000384300947,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023920999956317246,
"count": 8,
"is_parallel": true,
"self": 0.0023920999956317246
}
}
},
"UnityEnvironment.step": {
"total": 0.03171860001748428,
"count": 1,
"is_parallel": true,
"self": 0.0005291999550536275,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004539000219665468,
"count": 1,
"is_parallel": true,
"self": 0.0004539000219665468
},
"communicator.exchange": {
"total": 0.029097900027409196,
"count": 1,
"is_parallel": true,
"self": 0.029097900027409196
},
"steps_from_proto": {
"total": 0.0016376000130549073,
"count": 2,
"is_parallel": true,
"self": 0.00034689996391534805,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012907000491395593,
"count": 8,
"is_parallel": true,
"self": 0.0012907000491395593
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.06265069980872795,
"count": 50,
"is_parallel": true,
"self": 0.013310700247529894,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.049339999561198056,
"count": 200,
"is_parallel": true,
"self": 0.049339999561198056
}
}
},
"UnityEnvironment.step": {
"total": 8691.316745519289,
"count": 345890,
"is_parallel": true,
"self": 283.9603822171339,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 218.75333628768567,
"count": 345890,
"is_parallel": true,
"self": 218.75333628768567
},
"communicator.exchange": {
"total": 7329.674721300369,
"count": 345890,
"is_parallel": true,
"self": 7329.674721300369
},
"steps_from_proto": {
"total": 858.9283057141001,
"count": 691780,
"is_parallel": true,
"self": 187.93740530876676,
"children": {
"_process_rank_one_or_two_observation": {
"total": 670.9909004053334,
"count": 2767120,
"is_parallel": true,
"self": 670.9909004053334
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 9998.743600705464,
"count": 345891,
"self": 69.79396640125196,
"children": {
"process_trajectory": {
"total": 1742.1020065043122,
"count": 345891,
"self": 1739.5998391043977,
"children": {
"RLTrainer._checkpoint": {
"total": 2.5021673999144696,
"count": 10,
"self": 2.5021673999144696
}
}
},
"_update_policy": {
"total": 8186.8476277999,
"count": 242,
"self": 1012.9301666970132,
"children": {
"TorchPOCAOptimizer.update": {
"total": 7173.917461102887,
"count": 7260,
"self": 7173.917461102887
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.6999623514711857e-06,
"count": 1,
"self": 2.6999623514711857e-06
},
"TrainerController._save_models": {
"total": 0.22159849997842684,
"count": 1,
"self": 0.0020625999895855784,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21953589998884127,
"count": 1,
"self": 0.21953589998884127
}
}
}
}
}
}
}