poca-SoccerTwos / run_logs /timers.json
Menoua Keshishian
First Push
06754ad
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.5426337718963623,
"min": 1.531245470046997,
"max": 3.2957189083099365,
"count": 1000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 33320.890625,
"min": 7970.23046875,
"max": 128600.515625,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 55.26966292134831,
"min": 39.30952380952381,
"max": 999.0,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19676.0,
"min": 3996.0,
"max": 31656.0,
"count": 1000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1607.90912761641,
"min": 1194.6670111711967,
"max": 1637.0180950971917,
"count": 972
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 286207.82471572096,
"min": 2395.925075993749,
"max": 404377.00081549096,
"count": 972
},
"SoccerTwos.Step.mean": {
"value": 9999966.0,
"min": 9846.0,
"max": 9999966.0,
"count": 1000
},
"SoccerTwos.Step.sum": {
"value": 9999966.0,
"min": 9846.0,
"max": 9999966.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.034293223172426224,
"min": -0.12104719877243042,
"max": 0.1903175264596939,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -6.138486862182617,
"min": -23.874568939208984,
"max": 24.74559211730957,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.03585423529148102,
"min": -0.12008939683437347,
"max": 0.18938793241977692,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -6.417908191680908,
"min": -23.972457885742188,
"max": 24.840940475463867,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.022670391551609145,
"min": -0.6,
"max": 0.5419545437801968,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -4.058000087738037,
"min": -67.45359998941422,
"max": 61.70399987697601,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.022670391551609145,
"min": -0.6,
"max": 0.5419545437801968,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -4.058000087738037,
"min": -67.45359998941422,
"max": 61.70399987697601,
"count": 1000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.0185897947793516,
"min": 0.009789575341468055,
"max": 0.02578884911102553,
"count": 481
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.0185897947793516,
"min": 0.009789575341468055,
"max": 0.02578884911102553,
"count": 481
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.1093875877559185,
"min": 7.906696979868382e-06,
"max": 0.13266134212414424,
"count": 481
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.1093875877559185,
"min": 7.906696979868382e-06,
"max": 0.13266134212414424,
"count": 481
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11060970971981685,
"min": 7.798433792534827e-06,
"max": 0.1361390066643556,
"count": 481
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11060970971981685,
"min": 7.798433792534827e-06,
"max": 0.1361390066643556,
"count": 481
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 481
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 481
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 481
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 481
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 481
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 481
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676842658",
"python_version": "3.9.16 | packaged by conda-forge | (main, Feb 1 2023, 21:42:20) \n[Clang 14.0.6 ]",
"command_line_arguments": "/Users/menoua/opt/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1676876978"
},
"total": 34320.636995194,
"count": 1,
"self": 0.4771618579979986,
"children": {
"run_training.setup": {
"total": 0.03024002799999992,
"count": 1,
"self": 0.03024002799999992
},
"TrainerController.start_learning": {
"total": 34320.129593308004,
"count": 1,
"self": 14.530758860259084,
"children": {
"TrainerController._reset_env": {
"total": 4.604821874000669,
"count": 50,
"self": 4.604821874000669
},
"TrainerController.advance": {
"total": 34300.73496561474,
"count": 683570,
"self": 13.87728839054762,
"children": {
"env_step": {
"total": 10032.80140380752,
"count": 683570,
"self": 8270.437985418914,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1753.8916824040134,
"count": 683570,
"self": 73.03562723059713,
"children": {
"TorchPolicy.evaluate": {
"total": 1680.8560551734163,
"count": 1263360,
"self": 1680.8560551734163
}
}
},
"workers": {
"total": 8.471735984593252,
"count": 683570,
"self": 0.0,
"children": {
"worker_root": {
"total": 34292.74253611004,
"count": 683570,
"is_parallel": true,
"self": 27553.653667206636,
"children": {
"steps_from_proto": {
"total": 0.09807239999482897,
"count": 100,
"is_parallel": true,
"self": 0.021308775045160377,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0767636249496686,
"count": 400,
"is_parallel": true,
"self": 0.0767636249496686
}
}
},
"UnityEnvironment.step": {
"total": 6738.99079650341,
"count": 683570,
"is_parallel": true,
"self": 365.7697535714806,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 219.35635614431862,
"count": 683570,
"is_parallel": true,
"self": 219.35635614431862
},
"communicator.exchange": {
"total": 5042.1411571002,
"count": 683570,
"is_parallel": true,
"self": 5042.1411571002
},
"steps_from_proto": {
"total": 1111.7235296874107,
"count": 1367140,
"is_parallel": true,
"self": 231.5280831352045,
"children": {
"_process_rank_one_or_two_observation": {
"total": 880.1954465522062,
"count": 5468560,
"is_parallel": true,
"self": 880.1954465522062
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 24254.05627341667,
"count": 683570,
"self": 90.9083377452589,
"children": {
"process_trajectory": {
"total": 3033.4117440054147,
"count": 683570,
"self": 3027.4671627154057,
"children": {
"RLTrainer._checkpoint": {
"total": 5.944581290008955,
"count": 20,
"self": 5.944581290008955
}
}
},
"_update_policy": {
"total": 21129.736191665997,
"count": 481,
"self": 1550.1056513753756,
"children": {
"TorchPOCAOptimizer.update": {
"total": 19579.630540290622,
"count": 14442,
"self": 19579.630540290622
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0829971870407462e-06,
"count": 1,
"self": 1.0829971870407462e-06
},
"TrainerController._save_models": {
"total": 0.25904587600234663,
"count": 1,
"self": 0.008322439003677573,
"children": {
"RLTrainer._checkpoint": {
"total": 0.25072343699866906,
"count": 1,
"self": 0.25072343699866906
}
}
}
}
}
}
}