poca-SoccerTwos / run_logs /timers.json
Keenan5755's picture
First Push
8b3c6c8
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.4069772958755493,
"min": 1.3468372821807861,
"max": 3.2957348823547363,
"count": 5000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 30075.546875,
"min": 22171.5078125,
"max": 105463.515625,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 61.62337662337662,
"min": 38.736,
"max": 999.0,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 18980.0,
"min": 13080.0,
"max": 28216.0,
"count": 5000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1710.0113274282098,
"min": 1196.073674877001,
"max": 1772.7610341236173,
"count": 4998
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 263341.7444239443,
"min": 2396.08127181869,
"max": 419982.6893369895,
"count": 4998
},
"SoccerTwos.Step.mean": {
"value": 49999696.0,
"min": 9128.0,
"max": 49999696.0,
"count": 5000
},
"SoccerTwos.Step.sum": {
"value": 49999696.0,
"min": 9128.0,
"max": 49999696.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.027086248621344566,
"min": -0.13353946805000305,
"max": 0.20947594940662384,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -4.2525410652160645,
"min": -25.605226516723633,
"max": 25.177820205688477,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.02820216305553913,
"min": -0.13313187658786774,
"max": 0.20150870084762573,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -4.42773962020874,
"min": -25.302413940429688,
"max": 25.609683990478516,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.12298726352157106,
"min": -0.5,
"max": 0.46068000197410586,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 19.309000372886658,
"min": -64.20120006799698,
"max": 64.53359997272491,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.12298726352157106,
"min": -0.5,
"max": 0.46068000197410586,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 19.309000372886658,
"min": -64.20120006799698,
"max": 64.53359997272491,
"count": 5000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017986779745357732,
"min": 0.010510212313238298,
"max": 0.02574000319194359,
"count": 2424
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017986779745357732,
"min": 0.010510212313238298,
"max": 0.02574000319194359,
"count": 2424
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.07823928445577621,
"min": 0.000528234158991836,
"max": 0.13084790507952374,
"count": 2424
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.07823928445577621,
"min": 0.000528234158991836,
"max": 0.13084790507952374,
"count": 2424
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.07952647060155868,
"min": 0.000523627806493702,
"max": 0.1331552078326543,
"count": 2424
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.07952647060155868,
"min": 0.000523627806493702,
"max": 0.1331552078326543,
"count": 2424
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2424
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2424
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 2424
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 2424
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2424
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2424
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1696403757",
"python_version": "3.9.18 (main, Sep 11 2023, 13:41:44) \n[GCC 11.2.0]",
"command_line_arguments": "/opt/conda/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.x86_64 --run-id=SoccerTwos4 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.0.dev20231003",
"numpy_version": "1.26.0",
"end_time_seconds": "1696473272"
},
"total": 69514.93321041577,
"count": 1,
"self": 0.16795073170214891,
"children": {
"run_training.setup": {
"total": 0.009656739421188831,
"count": 1,
"self": 0.009656739421188831
},
"TrainerController.start_learning": {
"total": 69514.75560294464,
"count": 1,
"self": 37.14975555241108,
"children": {
"TrainerController._reset_env": {
"total": 4.256752256304026,
"count": 250,
"self": 4.256752256304026
},
"TrainerController.advance": {
"total": 69473.2781505268,
"count": 3437367,
"self": 32.72075719200075,
"children": {
"env_step": {
"total": 35875.554074604996,
"count": 3437367,
"self": 31633.094030559063,
"children": {
"SubprocessEnvManager._take_step": {
"total": 4219.770093231462,
"count": 3437367,
"self": 206.25313608534634,
"children": {
"TorchPolicy.evaluate": {
"total": 4013.5169571461156,
"count": 6279380,
"self": 4013.5169571461156
}
}
},
"workers": {
"total": 22.68995081447065,
"count": 3437367,
"self": 0.0,
"children": {
"worker_root": {
"total": 69447.12859765999,
"count": 3437367,
"is_parallel": true,
"self": 42521.725849598646,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002090802416205406,
"count": 2,
"is_parallel": true,
"self": 0.0004257494583725929,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016650529578328133,
"count": 8,
"is_parallel": true,
"self": 0.0016650529578328133
}
}
},
"UnityEnvironment.step": {
"total": 0.013848412781953812,
"count": 1,
"is_parallel": true,
"self": 0.0003952775150537491,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002933889627456665,
"count": 1,
"is_parallel": true,
"self": 0.0002933889627456665
},
"communicator.exchange": {
"total": 0.012025302276015282,
"count": 1,
"is_parallel": true,
"self": 0.012025302276015282
},
"steps_from_proto": {
"total": 0.0011344440281391144,
"count": 2,
"is_parallel": true,
"self": 0.00019432418048381805,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009401198476552963,
"count": 8,
"is_parallel": true,
"self": 0.0009401198476552963
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 26925.02196652163,
"count": 3437366,
"is_parallel": true,
"self": 1673.0620529102162,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 943.4379991926253,
"count": 3437366,
"is_parallel": true,
"self": 943.4379991926253
},
"communicator.exchange": {
"total": 19556.43012082018,
"count": 3437366,
"is_parallel": true,
"self": 19556.43012082018
},
"steps_from_proto": {
"total": 4752.091793598607,
"count": 6874732,
"is_parallel": true,
"self": 799.6544350106269,
"children": {
"_process_rank_one_or_two_observation": {
"total": 3952.4373585879803,
"count": 27498928,
"is_parallel": true,
"self": 3952.4373585879803
}
}
}
}
},
"steps_from_proto": {
"total": 0.3807815397158265,
"count": 498,
"is_parallel": true,
"self": 0.06608195882290602,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.3146995808929205,
"count": 1992,
"is_parallel": true,
"self": 0.3146995808929205
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 33565.00331872981,
"count": 3437367,
"self": 352.38222152832896,
"children": {
"process_trajectory": {
"total": 5930.971878269687,
"count": 3437367,
"self": 5923.677744247019,
"children": {
"RLTrainer._checkpoint": {
"total": 7.294134022668004,
"count": 100,
"self": 7.294134022668004
}
}
},
"_update_policy": {
"total": 27281.649218931794,
"count": 2424,
"self": 4282.0166029874235,
"children": {
"TorchPOCAOptimizer.update": {
"total": 22999.63261594437,
"count": 72720,
"self": 22999.63261594437
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.910413503646851e-07,
"count": 1,
"self": 6.910413503646851e-07
},
"TrainerController._save_models": {
"total": 0.0709439180791378,
"count": 1,
"self": 0.0010833581909537315,
"children": {
"RLTrainer._checkpoint": {
"total": 0.06986055988818407,
"count": 1,
"self": 0.06986055988818407
}
}
}
}
}
}
}