poca-SoccerTwos / run_logs /timers.json
gaioNL's picture
First Push
7dddc3b
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.5024334192276,
"min": 1.3184305429458618,
"max": 3.295718193054199,
"count": 5000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 26731.294921875,
"min": 14956.23828125,
"max": 132043.515625,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 92.11320754716981,
"min": 37.651162790697676,
"max": 999.0,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19528.0,
"min": 14552.0,
"max": 27504.0,
"count": 5000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1541.1355558924997,
"min": 1197.0158617313662,
"max": 1688.5004096561183,
"count": 4910
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 163360.36892460496,
"min": 2395.238694786875,
"max": 421624.67825181223,
"count": 4910
},
"SoccerTwos.Step.mean": {
"value": 49999964.0,
"min": 9768.0,
"max": 49999964.0,
"count": 5000
},
"SoccerTwos.Step.sum": {
"value": 49999964.0,
"min": 9768.0,
"max": 49999964.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.09638256579637527,
"min": -0.1396932154893875,
"max": 0.16838690638542175,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -10.216551780700684,
"min": -25.325477600097656,
"max": 22.267803192138672,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.09742562472820282,
"min": -0.14202383160591125,
"max": 0.16969288885593414,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -10.327116012573242,
"min": -25.125972747802734,
"max": 22.327320098876953,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.2516566042630178,
"min": -0.6363571413925716,
"max": 0.5916088885731168,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -26.675600051879883,
"min": -64.31119990348816,
"max": 59.81119990348816,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.2516566042630178,
"min": -0.6363571413925716,
"max": 0.5916088885731168,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -26.675600051879883,
"min": -64.31119990348816,
"max": 59.81119990348816,
"count": 5000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.018697804305702448,
"min": 0.009104452545579988,
"max": 0.026643019893284266,
"count": 2423
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.018697804305702448,
"min": 0.009104452545579988,
"max": 0.026643019893284266,
"count": 2423
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.07356646607319514,
"min": 2.9910259134643033e-07,
"max": 0.12964732473095258,
"count": 2423
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.07356646607319514,
"min": 2.9910259134643033e-07,
"max": 0.12964732473095258,
"count": 2423
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.07455665295322736,
"min": 3.0016980142969864e-07,
"max": 0.13194423938790958,
"count": 2423
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.07455665295322736,
"min": 3.0016980142969864e-07,
"max": 0.13194423938790958,
"count": 2423
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2423
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2423
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 2423
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 2423
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2423
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2423
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1694809363",
"python_version": "3.9.17 (main, Jul 5 2023, 20:41:20) \n[GCC 11.2.0]",
"command_line_arguments": "/home/gaionl/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos1 --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1695040180"
},
"total": 230817.004368874,
"count": 1,
"self": 0.44111788604641333,
"children": {
"run_training.setup": {
"total": 0.011371827000402845,
"count": 1,
"self": 0.011371827000402845
},
"TrainerController.start_learning": {
"total": 230816.55187916098,
"count": 1,
"self": 85.74591592012439,
"children": {
"TrainerController._reset_env": {
"total": 6.6357272081731935,
"count": 250,
"self": 6.6357272081731935
},
"TrainerController.advance": {
"total": 230723.90142067865,
"count": 3448206,
"self": 91.43295751590631,
"children": {
"env_step": {
"total": 72865.68168361404,
"count": 3448206,
"self": 61289.247190914786,
"children": {
"SubprocessEnvManager._take_step": {
"total": 11522.558785983892,
"count": 3448206,
"self": 537.6852973723653,
"children": {
"TorchPolicy.evaluate": {
"total": 10984.873488611527,
"count": 6283316,
"self": 10984.873488611527
}
}
},
"workers": {
"total": 53.8757067153565,
"count": 3448206,
"self": 0.0,
"children": {
"worker_root": {
"total": 230677.16631610927,
"count": 3448206,
"is_parallel": true,
"self": 179001.27623207617,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0035869470084435306,
"count": 2,
"is_parallel": true,
"self": 0.0016239180040429346,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001963029004400596,
"count": 8,
"is_parallel": true,
"self": 0.001963029004400596
}
}
},
"UnityEnvironment.step": {
"total": 0.03157267300412059,
"count": 1,
"is_parallel": true,
"self": 0.0010514200112083927,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0008765649981796741,
"count": 1,
"is_parallel": true,
"self": 0.0008765649981796741
},
"communicator.exchange": {
"total": 0.026194890997430775,
"count": 1,
"is_parallel": true,
"self": 0.026194890997430775
},
"steps_from_proto": {
"total": 0.003449796997301746,
"count": 2,
"is_parallel": true,
"self": 0.0006314890051726252,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002818307992129121,
"count": 8,
"is_parallel": true,
"self": 0.002818307992129121
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 51675.3453890593,
"count": 3448205,
"is_parallel": true,
"self": 3156.6989525170357,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2364.1434886952848,
"count": 3448205,
"is_parallel": true,
"self": 2364.1434886952848
},
"communicator.exchange": {
"total": 36481.022178212566,
"count": 3448205,
"is_parallel": true,
"self": 36481.022178212566
},
"steps_from_proto": {
"total": 9673.480769634414,
"count": 6896410,
"is_parallel": true,
"self": 1761.7438183802733,
"children": {
"_process_rank_one_or_two_observation": {
"total": 7911.736951254141,
"count": 27585640,
"is_parallel": true,
"self": 7911.736951254141
}
}
}
}
},
"steps_from_proto": {
"total": 0.5446949737961404,
"count": 498,
"is_parallel": true,
"self": 0.10069432427553693,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.4440006495206035,
"count": 1992,
"is_parallel": true,
"self": 0.4440006495206035
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 157766.7867795487,
"count": 3448206,
"self": 553.2083718009526,
"children": {
"process_trajectory": {
"total": 14261.549504052033,
"count": 3448206,
"self": 14237.875778907168,
"children": {
"RLTrainer._checkpoint": {
"total": 23.673725144864875,
"count": 100,
"self": 23.673725144864875
}
}
},
"_update_policy": {
"total": 142952.02890369573,
"count": 2423,
"self": 8946.41707963511,
"children": {
"TorchPOCAOptimizer.update": {
"total": 134005.61182406062,
"count": 72690,
"self": 134005.61182406062
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3430253602564335e-06,
"count": 1,
"self": 1.3430253602564335e-06
},
"TrainerController._save_models": {
"total": 0.26881401101127267,
"count": 1,
"self": 0.00218809099169448,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2666259200195782,
"count": 1,
"self": 0.2666259200195782
}
}
}
}
}
}
}