poca-SoccerTwos / run_logs /timers.json
lovelyxs's picture
First Push
0f74ee4
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.2273454666137695,
"min": 2.2175986766815186,
"max": 3.227695941925049,
"count": 517
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 43335.234375,
"min": 10667.50390625,
"max": 119745.4765625,
"count": 517
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 53.561797752808985,
"min": 40.925619834710744,
"max": 999.0,
"count": 517
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19068.0,
"min": 1056.0,
"max": 31968.0,
"count": 517
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1504.1926838586373,
"min": 1186.0004903836195,
"max": 1512.4895715871382,
"count": 464
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 267746.29772683745,
"min": 2372.000980767239,
"max": 351916.06924432574,
"count": 464
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 517
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 517
},
"SoccerTwos.Step.mean": {
"value": 5659954.0,
"min": 509744.0,
"max": 5659954.0,
"count": 516
},
"SoccerTwos.Step.sum": {
"value": 5659954.0,
"min": 509744.0,
"max": 5659954.0,
"count": 516
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.08629171550273895,
"min": -0.11334313452243805,
"max": 0.23142051696777344,
"count": 516
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 15.359925270080566,
"min": -20.5283203125,
"max": 44.441707611083984,
"count": 516
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.07696717232465744,
"min": -0.1191025823354721,
"max": 0.228508859872818,
"count": 516
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 13.700156211853027,
"min": -19.5948429107666,
"max": 44.568458557128906,
"count": 516
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 516
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 516
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.11198651522732853,
"min": -0.7142857142857143,
"max": 0.46606250293552876,
"count": 516
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 19.933599710464478,
"min": -44.531199753284454,
"max": 97.17799997329712,
"count": 516
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.11198651522732853,
"min": -0.7142857142857143,
"max": 0.46606250293552876,
"count": 516
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 19.933599710464478,
"min": -44.531199753284454,
"max": 97.17799997329712,
"count": 516
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01888621726927037,
"min": 0.010425080761585074,
"max": 0.02376857806618015,
"count": 244
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01888621726927037,
"min": 0.010425080761585074,
"max": 0.02376857806618015,
"count": 244
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10987885296344757,
"min": 1.714208913957312e-06,
"max": 0.10987885296344757,
"count": 244
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10987885296344757,
"min": 1.714208913957312e-06,
"max": 0.10987885296344757,
"count": 244
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11204245090484619,
"min": 1.4393047194971586e-06,
"max": 0.11204245090484619,
"count": 244
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11204245090484619,
"min": 1.4393047194971586e-06,
"max": 0.11204245090484619,
"count": 244
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 244
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 244
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 244
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 244
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 244
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 244
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689386118",
"python_version": "3.9.17 (main, Jul 5 2023, 20:47:11) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\hsujs\\miniconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1689405283"
},
"total": 19164.951590700002,
"count": 1,
"self": 0.2140637999982573,
"children": {
"run_training.setup": {
"total": 0.12235099999999965,
"count": 1,
"self": 0.12235099999999965
},
"TrainerController.start_learning": {
"total": 19164.615175900002,
"count": 1,
"self": 9.635304699608241,
"children": {
"TrainerController._reset_env": {
"total": 6.171220099996478,
"count": 27,
"self": 6.171220099996478
},
"TrainerController.advance": {
"total": 19148.602981200394,
"count": 342530,
"self": 9.946182800293172,
"children": {
"env_step": {
"total": 7370.093500300388,
"count": 342530,
"self": 5722.4948751995435,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1641.3676411007264,
"count": 342530,
"self": 56.48914060115112,
"children": {
"TorchPolicy.evaluate": {
"total": 1584.8785004995752,
"count": 659218,
"self": 1584.8785004995752
}
}
},
"workers": {
"total": 6.230984000118375,
"count": 342529,
"self": 0.0,
"children": {
"worker_root": {
"total": 19146.92048600019,
"count": 342529,
"is_parallel": true,
"self": 14552.490896399906,
"children": {
"steps_from_proto": {
"total": 0.053192699994924375,
"count": 54,
"is_parallel": true,
"self": 0.011896599984961398,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.04129610000996298,
"count": 216,
"is_parallel": true,
"self": 0.04129610000996298
}
}
},
"UnityEnvironment.step": {
"total": 4594.376396900288,
"count": 342529,
"is_parallel": true,
"self": 222.08454690051894,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 198.59745240033703,
"count": 342529,
"is_parallel": true,
"self": 198.59745240033703
},
"communicator.exchange": {
"total": 3446.9493110000635,
"count": 342529,
"is_parallel": true,
"self": 3446.9493110000635
},
"steps_from_proto": {
"total": 726.7450865993674,
"count": 685058,
"is_parallel": true,
"self": 154.0908538017153,
"children": {
"_process_rank_one_or_two_observation": {
"total": 572.6542327976521,
"count": 2740232,
"is_parallel": true,
"self": 572.6542327976521
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 11768.563298099714,
"count": 342529,
"self": 65.42690689986557,
"children": {
"process_trajectory": {
"total": 1471.5374333998707,
"count": 342529,
"self": 1469.5611095998727,
"children": {
"RLTrainer._checkpoint": {
"total": 1.97632379999801,
"count": 11,
"self": 1.97632379999801
}
}
},
"_update_policy": {
"total": 10231.598957799979,
"count": 244,
"self": 977.9501888999621,
"children": {
"TorchPOCAOptimizer.update": {
"total": 9253.648768900017,
"count": 7332,
"self": 9253.648768900017
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.7000020307023078e-06,
"count": 1,
"self": 1.7000020307023078e-06
},
"TrainerController._save_models": {
"total": 0.20566820000021835,
"count": 1,
"self": 0.025601700002880534,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18006649999733781,
"count": 1,
"self": 0.18006649999733781
}
}
}
}
}
}
}