poca-SoccerTwos / run_logs /timers.json
youness
elo1.6k
c505a06 verified
raw
history blame
15.6 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.791628360748291,
"min": 1.735303282737732,
"max": 3.2957119941711426,
"count": 879
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 34857.921875,
"min": 13446.552734375,
"max": 141640.171875,
"count": 879
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 68.1917808219178,
"min": 49.61616161616162,
"max": 999.0,
"count": 879
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19912.0,
"min": 3996.0,
"max": 31224.0,
"count": 879
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1596.2409319507847,
"min": 1186.8126055332164,
"max": 1620.3184736724454,
"count": 807
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 233051.17606481456,
"min": 2373.6252110664327,
"max": 318548.889616456,
"count": 807
},
"SoccerTwos.Step.mean": {
"value": 8789938.0,
"min": 9148.0,
"max": 8789938.0,
"count": 879
},
"SoccerTwos.Step.sum": {
"value": 8789938.0,
"min": 9148.0,
"max": 8789938.0,
"count": 879
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.015257895924150944,
"min": -0.13309119641780853,
"max": 0.22954100370407104,
"count": 879
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -2.212394952774048,
"min": -16.13796615600586,
"max": 32.54047775268555,
"count": 879
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.015654107555747032,
"min": -0.13294994831085205,
"max": 0.23057672381401062,
"count": 879
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -2.269845724105835,
"min": -17.22362518310547,
"max": 32.340572357177734,
"count": 879
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 879
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 879
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.004852411253698941,
"min": -0.6153846153846154,
"max": 0.48171628491823065,
"count": 879
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.7035996317863464,
"min": -52.18440043926239,
"max": 65.81360006332397,
"count": 879
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.004852411253698941,
"min": -0.6153846153846154,
"max": 0.48171628491823065,
"count": 879
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.7035996317863464,
"min": -52.18440043926239,
"max": 65.81360006332397,
"count": 879
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 879
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 879
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016110525854552787,
"min": 0.011214561320957727,
"max": 0.02530888847153013,
"count": 419
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016110525854552787,
"min": 0.011214561320957727,
"max": 0.02530888847153013,
"count": 419
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.095163727303346,
"min": 2.7799073407663857e-07,
"max": 0.10515277435382207,
"count": 419
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.095163727303346,
"min": 2.7799073407663857e-07,
"max": 0.10515277435382207,
"count": 419
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09731189881761869,
"min": 3.049724206505289e-07,
"max": 0.10713796441753705,
"count": 419
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09731189881761869,
"min": 3.049724206505289e-07,
"max": 0.10713796441753705,
"count": 419
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 419
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 419
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 419
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 419
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 419
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 419
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1710785195",
"python_version": "3.10.12 (main, Jul 5 2023, 15:02:25) [Clang 14.0.6 ]",
"command_line_arguments": "/opt/homebrew/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1",
"numpy_version": "1.23.5",
"end_time_seconds": "1710826245"
},
"total": 37501.77599775001,
"count": 1,
"self": 0.17920458301523468,
"children": {
"run_training.setup": {
"total": 0.03125733300112188,
"count": 1,
"self": 0.03125733300112188
},
"TrainerController.start_learning": {
"total": 37501.56553583399,
"count": 1,
"self": 7.007030306500383,
"children": {
"TrainerController._reset_env": {
"total": 8.093730084023264,
"count": 44,
"self": 8.093730084023264
},
"TrainerController.advance": {
"total": 37486.349426527464,
"count": 591223,
"self": 6.801121357726515,
"children": {
"env_step": {
"total": 29141.95539214684,
"count": 591223,
"self": 28126.724425609842,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1010.812899589393,
"count": 591223,
"self": 31.608927101558947,
"children": {
"TorchPolicy.evaluate": {
"total": 979.2039724878341,
"count": 1116124,
"self": 979.2039724878341
}
}
},
"workers": {
"total": 4.41806694760453,
"count": 591222,
"self": 0.0,
"children": {
"worker_root": {
"total": 37486.50146003467,
"count": 591222,
"is_parallel": true,
"self": 10235.900265968987,
"children": {
"steps_from_proto": {
"total": 0.09003241801110562,
"count": 88,
"is_parallel": true,
"self": 0.010110262002854142,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.07992215600825148,
"count": 352,
"is_parallel": true,
"self": 0.07992215600825148
}
}
},
"UnityEnvironment.step": {
"total": 27250.51116164767,
"count": 591222,
"is_parallel": true,
"self": 77.03721077784576,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 482.1863650019877,
"count": 591222,
"is_parallel": true,
"self": 482.1863650019877
},
"communicator.exchange": {
"total": 25671.4253012892,
"count": 591222,
"is_parallel": true,
"self": 25671.4253012892
},
"steps_from_proto": {
"total": 1019.8622845786376,
"count": 1182444,
"is_parallel": true,
"self": 110.07768299250893,
"children": {
"_process_rank_one_or_two_observation": {
"total": 909.7846015861287,
"count": 4729776,
"is_parallel": true,
"self": 909.7846015861287
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 8337.592913022898,
"count": 591222,
"self": 58.09901485214505,
"children": {
"process_trajectory": {
"total": 1213.6244861298765,
"count": 591222,
"self": 1210.9133504228666,
"children": {
"RLTrainer._checkpoint": {
"total": 2.7111357070098165,
"count": 17,
"self": 2.7111357070098165
}
}
},
"_update_policy": {
"total": 7065.869412040876,
"count": 420,
"self": 719.8241967614667,
"children": {
"TorchPOCAOptimizer.update": {
"total": 6346.04521527941,
"count": 12612,
"self": 6346.04521527941
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.45800004247576e-06,
"count": 1,
"self": 1.45800004247576e-06
},
"TrainerController._save_models": {
"total": 0.11534745800599921,
"count": 1,
"self": 0.0017820830107666552,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11356537499523256,
"count": 1,
"self": 0.11356537499523256
}
}
}
}
}
}
}