SoccerTwos / run_logs /timers.json
manuu01's picture
Update_agent
b11bb69
raw
history blame
20.1 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.21075439453125,
"min": 3.204401731491089,
"max": 3.239344835281372,
"count": 50
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 80859.640625,
"min": 30506.2265625,
"max": 103413.765625,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 522.7,
"max": 999.0,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 5836.0,
"max": 31496.0,
"count": 50
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1204.4078887185597,
"min": 1203.3760978452917,
"max": 1211.012974755959,
"count": 38
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2408.8157774371193,
"min": 2406.7521956905834,
"max": 9676.538114551746,
"count": 38
},
"SoccerTwos.Step.mean": {
"value": 999580.0,
"min": 509863.0,
"max": 999580.0,
"count": 50
},
"SoccerTwos.Step.sum": {
"value": 999580.0,
"min": 509863.0,
"max": 999580.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.000138396630063653,
"min": -0.012456486001610756,
"max": 0.010768167674541473,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.00138396630063653,
"min": -0.12456485629081726,
"max": 0.10768167674541473,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.000429503241321072,
"min": -0.002827476244419813,
"max": 0.00833826418966055,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.004295032471418381,
"min": -0.034029409289360046,
"max": 0.08765929937362671,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": -0.46153846153846156,
"max": 0.2863714354378836,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": -6.0,
"max": 4.009200096130371,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": -0.46153846153846156,
"max": 0.2863714354378836,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": -6.0,
"max": 4.009200096130371,
"count": 50
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016490051922543596,
"min": 0.013185302205965855,
"max": 0.024510134050312143,
"count": 22
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016490051922543596,
"min": 0.013185302205965855,
"max": 0.024510134050312143,
"count": 22
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0006891566416015848,
"min": 9.487354261636938e-05,
"max": 0.004903722905308112,
"count": 22
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0006891566416015848,
"min": 9.487354261636938e-05,
"max": 0.004903722905308112,
"count": 22
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0006855302412683765,
"min": 9.53001888850243e-05,
"max": 0.002271842449570411,
"count": 22
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0006855302412683765,
"min": 9.53001888850243e-05,
"max": 0.002271842449570411,
"count": 22
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0007500000000000002,
"min": 0.00075,
"max": 0.0007500000000000002,
"count": 22
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0007500000000000002,
"min": 0.00075,
"max": 0.0007500000000000002,
"count": 22
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 22
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 22
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 22
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 22
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690834013",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --resume --torch-device=cpu",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690835903"
},
"total": 1890.6797339620002,
"count": 1,
"self": 0.4866258970000672,
"children": {
"run_training.setup": {
"total": 0.06813264099992011,
"count": 1,
"self": 0.06813264099992011
},
"TrainerController.start_learning": {
"total": 1890.1249754240002,
"count": 1,
"self": 1.257913461006865,
"children": {
"TrainerController._reset_env": {
"total": 2.3086763210003483,
"count": 4,
"self": 2.3086763210003483
},
"TrainerController.advance": {
"total": 1886.3343982839929,
"count": 32791,
"self": 1.2389807089502938,
"children": {
"env_step": {
"total": 1087.0959611040507,
"count": 32791,
"self": 912.4459261100778,
"children": {
"SubprocessEnvManager._take_step": {
"total": 173.86589059898233,
"count": 32791,
"self": 7.4967251109178505,
"children": {
"TorchPolicy.evaluate": {
"total": 166.36916548806448,
"count": 65090,
"self": 166.36916548806448
}
}
},
"workers": {
"total": 0.7841443949905624,
"count": 32791,
"self": 0.0,
"children": {
"worker_root": {
"total": 1881.5510355180058,
"count": 32791,
"is_parallel": true,
"self": 1121.1697415899635,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006408747000023141,
"count": 2,
"is_parallel": true,
"self": 0.001576562000082049,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0048321849999410915,
"count": 8,
"is_parallel": true,
"self": 0.0048321849999410915
}
}
},
"UnityEnvironment.step": {
"total": 0.077040795999892,
"count": 1,
"is_parallel": true,
"self": 0.001313672000378574,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0009023519996844698,
"count": 1,
"is_parallel": true,
"self": 0.0009023519996844698
},
"communicator.exchange": {
"total": 0.06991308099986782,
"count": 1,
"is_parallel": true,
"self": 0.06991308099986782
},
"steps_from_proto": {
"total": 0.004911690999961138,
"count": 2,
"is_parallel": true,
"self": 0.0007516129990108311,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004160078000950307,
"count": 8,
"is_parallel": true,
"self": 0.004160078000950307
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.009500134000063554,
"count": 6,
"is_parallel": true,
"self": 0.0016614720016150386,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.007838661998448515,
"count": 24,
"is_parallel": true,
"self": 0.007838661998448515
}
}
},
"UnityEnvironment.step": {
"total": 760.3717937940423,
"count": 32790,
"is_parallel": true,
"self": 45.25467255386002,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.82686035908,
"count": 32790,
"is_parallel": true,
"self": 22.82686035908
},
"communicator.exchange": {
"total": 548.1324448990013,
"count": 32790,
"is_parallel": true,
"self": 548.1324448990013
},
"steps_from_proto": {
"total": 144.15781598210106,
"count": 65580,
"is_parallel": true,
"self": 24.902229387292664,
"children": {
"_process_rank_one_or_two_observation": {
"total": 119.25558659480839,
"count": 262320,
"is_parallel": true,
"self": 119.25558659480839
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 797.9994564709918,
"count": 32791,
"self": 9.946200549958121,
"children": {
"process_trajectory": {
"total": 117.76115255903596,
"count": 32791,
"self": 117.525524951036,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23562760799995885,
"count": 1,
"self": 0.23562760799995885
}
}
},
"_update_policy": {
"total": 670.2921033619978,
"count": 22,
"self": 125.4074932389849,
"children": {
"TorchPOCAOptimizer.update": {
"total": 544.8846101230129,
"count": 678,
"self": 544.8846101230129
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.044999746431131e-06,
"count": 1,
"self": 2.044999746431131e-06
},
"TrainerController._save_models": {
"total": 0.22398531300041213,
"count": 1,
"self": 0.002115945000696229,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2218693679997159,
"count": 1,
"self": 0.2218693679997159
}
}
}
}
}
}
}