poca-SoccerTwos / run_logs /timers.json
i-pj's picture
First Push
3d0bfd6 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.679749846458435,
"min": 1.5837690830230713,
"max": 1.6931036710739136,
"count": 18
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 33379.98828125,
"min": 1165.654052734375,
"max": 35658.38671875,
"count": 18
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 62.64102564102564,
"min": 18.5,
"max": 87.15789473684211,
"count": 18
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19544.0,
"min": 148.0,
"max": 20304.0,
"count": 18
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1222.809791901454,
"min": 1193.0768502042945,
"max": 1226.4892024962849,
"count": 18
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 190758.32753662684,
"min": 4800.0,
"max": 201144.22920939073,
"count": 18
},
"SoccerTwos.Step.mean": {
"value": 10169935.0,
"min": 9999982.0,
"max": 10169935.0,
"count": 18
},
"SoccerTwos.Step.sum": {
"value": 10169935.0,
"min": 9999982.0,
"max": 10169935.0,
"count": 18
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.006176153663545847,
"min": -0.024318289011716843,
"max": 0.19033126533031464,
"count": 18
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.9634799957275391,
"min": -3.6477432250976562,
"max": 15.603649139404297,
"count": 18
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0054854401387274265,
"min": -0.023617848753929138,
"max": 0.19180536270141602,
"count": 18
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.8557286262512207,
"min": -3.542677402496338,
"max": 15.997982025146484,
"count": 18
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 18
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 18
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.1732794864055438,
"min": -0.1732794864055438,
"max": 0.3216000000635783,
"count": 18
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -27.03159987926483,
"min": -27.03159987926483,
"max": 46.63159966468811,
"count": 18
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.1732794864055438,
"min": -0.1732794864055438,
"max": 0.3216000000635783,
"count": 18
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -27.03159987926483,
"min": -27.03159987926483,
"max": 46.63159966468811,
"count": 18
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 18
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 18
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.015739139712726075,
"min": 0.014609941675735172,
"max": 0.01964028791990131,
"count": 8
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.015739139712726075,
"min": 0.014609941675735172,
"max": 0.01964028791990131,
"count": 8
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0893124724427859,
"min": 0.08485158532857895,
"max": 0.09563147947192192,
"count": 8
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0893124724427859,
"min": 0.08485158532857895,
"max": 0.09563147947192192,
"count": 8
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09080011248588563,
"min": 0.08616696794827779,
"max": 0.09690051277478536,
"count": 8
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09080011248588563,
"min": 0.08616696794827779,
"max": 0.09690051277478536,
"count": 8
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 8
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 8
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 8
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 8
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 8
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 8
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1714188442",
"python_version": "3.10.12 (main, Jul 5 2023, 15:02:25) [Clang 14.0.6 ]",
"command_line_arguments": "/Users/parth/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0",
"numpy_version": "1.23.5",
"end_time_seconds": "1714189152"
},
"total": 710.7810192499892,
"count": 1,
"self": 0.23493162498925813,
"children": {
"run_training.setup": {
"total": 0.028353750007227063,
"count": 1,
"self": 0.028353750007227063
},
"TrainerController.start_learning": {
"total": 710.5177338749927,
"count": 1,
"self": 0.1342643857933581,
"children": {
"TrainerController._reset_env": {
"total": 2.631008541007759,
"count": 2,
"self": 2.631008541007759
},
"TrainerController.advance": {
"total": 707.6277636972081,
"count": 11636,
"self": 0.12268581980606541,
"children": {
"env_step": {
"total": 568.1032658139302,
"count": 11636,
"self": 548.0383035824925,
"children": {
"SubprocessEnvManager._take_step": {
"total": 19.98557951292605,
"count": 11636,
"self": 0.7190609581302851,
"children": {
"TorchPolicy.evaluate": {
"total": 19.266518554795766,
"count": 21346,
"self": 19.266518554795766
}
}
},
"workers": {
"total": 0.07938271851162426,
"count": 11636,
"self": 0.0,
"children": {
"worker_root": {
"total": 708.1492451805389,
"count": 11636,
"is_parallel": true,
"self": 177.74469017470255,
"children": {
"steps_from_proto": {
"total": 0.0037761230196338147,
"count": 4,
"is_parallel": true,
"self": 0.0005192049720790237,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003256918047554791,
"count": 16,
"is_parallel": true,
"self": 0.003256918047554791
}
}
},
"UnityEnvironment.step": {
"total": 530.4007788828167,
"count": 11636,
"is_parallel": true,
"self": 1.4402798907249235,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 9.246110053150915,
"count": 11636,
"is_parallel": true,
"self": 9.246110053150915
},
"communicator.exchange": {
"total": 500.89330227387836,
"count": 11636,
"is_parallel": true,
"self": 500.89330227387836
},
"steps_from_proto": {
"total": 18.821086665062467,
"count": 23272,
"is_parallel": true,
"self": 2.077526547946036,
"children": {
"_process_rank_one_or_two_observation": {
"total": 16.74356011711643,
"count": 93088,
"is_parallel": true,
"self": 16.74356011711643
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 139.40181206347188,
"count": 11636,
"self": 1.0675276794936508,
"children": {
"process_trajectory": {
"total": 29.373940135963494,
"count": 11636,
"self": 29.23191492795013,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14202520801336505,
"count": 1,
"self": 0.14202520801336505
}
}
},
"_update_policy": {
"total": 108.96034424801474,
"count": 8,
"self": 12.666946159064537,
"children": {
"TorchPOCAOptimizer.update": {
"total": 96.2933980889502,
"count": 240,
"self": 96.2933980889502
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.5899651013314724e-07,
"count": 1,
"self": 4.5899651013314724e-07
},
"TrainerController._save_models": {
"total": 0.12469679198693484,
"count": 1,
"self": 0.0009215000027325004,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12377529198420234,
"count": 1,
"self": 0.12377529198420234
}
}
}
}
}
}
}