poca-SoccerTwos / run_logs /timers.json
guirnd's picture
First Push
e64872f verified
raw
history blame
20.1 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.1323869228363037,
"min": 3.0606625080108643,
"max": 3.2958083152770996,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 40996.6796875,
"min": 17678.60546875,
"max": 170737.53125,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 371.9230769230769,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 10600.0,
"max": 25616.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1181.7160720870315,
"min": 1177.4109326952434,
"max": 1208.5830662436736,
"count": 422
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2363.432144174063,
"min": 2357.199266755703,
"max": 19141.64147920675,
"count": 422
},
"SoccerTwos.Step.mean": {
"value": 4999854.0,
"min": 9658.0,
"max": 4999854.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999854.0,
"min": 9658.0,
"max": 4999854.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.003024178557097912,
"min": -0.031495776027441025,
"max": 0.06315721571445465,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.03024178557097912,
"min": -0.5153208374977112,
"max": 0.862785816192627,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.002978297881782055,
"min": -0.033182282000780106,
"max": 0.07270117849111557,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.02978297881782055,
"min": -0.5303707718849182,
"max": 0.945115327835083,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": -0.6835333324140973,
"max": 0.4140235273277058,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": -12.729200005531311,
"max": 7.038399964570999,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": -0.6835333324140973,
"max": 0.4140235273277058,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": -12.729200005531311,
"max": 7.038399964570999,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01682371503363053,
"min": 0.011035426127515771,
"max": 0.024981034867232667,
"count": 232
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01682371503363053,
"min": 0.011035426127515771,
"max": 0.024981034867232667,
"count": 232
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.00032613252939578765,
"min": 1.1867353927404642e-06,
"max": 0.02258039607355992,
"count": 232
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.00032613252939578765,
"min": 1.1867353927404642e-06,
"max": 0.02258039607355992,
"count": 232
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.00032702218062089135,
"min": 9.515035074514344e-07,
"max": 0.016307429557976625,
"count": 232
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.00032702218062089135,
"min": 9.515035074514344e-07,
"max": 0.016307429557976625,
"count": 232
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0010000000000000002,
"min": 0.001,
"max": 0.0010000000000000002,
"count": 232
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0010000000000000002,
"min": 0.001,
"max": 0.0010000000000000002,
"count": 232
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 232
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 232
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 232
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 232
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1707916938",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/mnt/c/Users/anom/Desktop/rl_course/rl_env/bin/mlagents-learn config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1707944874"
},
"total": 27936.812676874,
"count": 1,
"self": 10.032790425000712,
"children": {
"run_training.setup": {
"total": 0.5509300390003773,
"count": 1,
"self": 0.5509300390003773
},
"TrainerController.start_learning": {
"total": 27926.22895641,
"count": 1,
"self": 8.590567313713109,
"children": {
"TrainerController._reset_env": {
"total": 12.140077391002706,
"count": 25,
"self": 12.140077391002706
},
"TrainerController.advance": {
"total": 27905.09097910728,
"count": 325478,
"self": 7.641760156941018,
"children": {
"env_step": {
"total": 9921.630150804955,
"count": 325478,
"self": 7656.037897949961,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2260.346262503399,
"count": 325478,
"self": 53.99732742945798,
"children": {
"TorchPolicy.evaluate": {
"total": 2206.348935073941,
"count": 646134,
"self": 2206.348935073941
}
}
},
"workers": {
"total": 5.245990351594628,
"count": 325478,
"self": 0.0,
"children": {
"worker_root": {
"total": 27907.781506887415,
"count": 325478,
"is_parallel": true,
"self": 21440.227096126288,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.013961601001028612,
"count": 2,
"is_parallel": true,
"self": 0.01151730100173154,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0024442999992970726,
"count": 8,
"is_parallel": true,
"self": 0.0024442999992970726
}
}
},
"UnityEnvironment.step": {
"total": 0.035664302999975916,
"count": 1,
"is_parallel": true,
"self": 0.0006882000016048551,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0008597999994890415,
"count": 1,
"is_parallel": true,
"self": 0.0008597999994890415
},
"communicator.exchange": {
"total": 0.03181540199966548,
"count": 1,
"is_parallel": true,
"self": 0.03181540199966548
},
"steps_from_proto": {
"total": 0.0023009009992165375,
"count": 2,
"is_parallel": true,
"self": 0.0005528999981834204,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001748001001033117,
"count": 8,
"is_parallel": true,
"self": 0.001748001001033117
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 6467.50261765512,
"count": 325477,
"is_parallel": true,
"self": 216.06494273081535,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 242.568812012727,
"count": 325477,
"is_parallel": true,
"self": 242.568812012727
},
"communicator.exchange": {
"total": 5300.24297349226,
"count": 325477,
"is_parallel": true,
"self": 5300.24297349226
},
"steps_from_proto": {
"total": 708.6258894193179,
"count": 650954,
"is_parallel": true,
"self": 169.0170130383558,
"children": {
"_process_rank_one_or_two_observation": {
"total": 539.6088763809621,
"count": 2603816,
"is_parallel": true,
"self": 539.6088763809621
}
}
}
}
},
"steps_from_proto": {
"total": 0.05179310600669851,
"count": 48,
"is_parallel": true,
"self": 0.01254410402088979,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.03924900198580872,
"count": 192,
"is_parallel": true,
"self": 0.03924900198580872
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 17975.819068145385,
"count": 325478,
"self": 61.6821880695461,
"children": {
"process_trajectory": {
"total": 1634.5572613858694,
"count": 325478,
"self": 1631.2246501808704,
"children": {
"RLTrainer._checkpoint": {
"total": 3.3326112049990115,
"count": 10,
"self": 3.3326112049990115
}
}
},
"_update_policy": {
"total": 16279.57961868997,
"count": 232,
"self": 924.5769946849359,
"children": {
"TorchPOCAOptimizer.update": {
"total": 15355.002624005034,
"count": 6963,
"self": 15355.002624005034
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2000018614344299e-06,
"count": 1,
"self": 1.2000018614344299e-06
},
"TrainerController._save_models": {
"total": 0.40733139800067875,
"count": 1,
"self": 0.1052960000015446,
"children": {
"RLTrainer._checkpoint": {
"total": 0.30203539799913415,
"count": 1,
"self": 0.30203539799913415
}
}
}
}
}
}
}