poca-SoccerTwos / run_logs /timers.json
DavidPL1's picture
First Push
e2f8515 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.8448234796524048,
"min": 1.7827210426330566,
"max": 3.2957472801208496,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 38667.5,
"min": 21874.908203125,
"max": 118266.96875,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 55.18681318681319,
"min": 40.608333333333334,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20088.0,
"min": 16308.0,
"max": 24156.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1638.9551668867218,
"min": 1196.3703374214208,
"max": 1643.7267323121498,
"count": 498
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 298289.84037338337,
"min": 2394.524918640403,
"max": 376242.54906729347,
"count": 498
},
"SoccerTwos.Step.mean": {
"value": 4999860.0,
"min": 9096.0,
"max": 4999860.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999860.0,
"min": 9096.0,
"max": 4999860.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.04752557352185249,
"min": -0.10519690066576004,
"max": 0.17328228056430817,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 8.649654388427734,
"min": -18.59147071838379,
"max": 35.273250579833984,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.0490778312087059,
"min": -0.10707984119653702,
"max": 0.17463314533233643,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 8.932165145874023,
"min": -19.95433807373047,
"max": 35.5179443359375,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.015215383781181587,
"min": -0.7706000000238419,
"max": 0.4468805193901062,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -2.769199848175049,
"min": -61.963199973106384,
"max": 64.50340002775192,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.015215383781181587,
"min": -0.7706000000238419,
"max": 0.4468805193901062,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -2.769199848175049,
"min": -61.963199973106384,
"max": 64.50340002775192,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.018510479600323985,
"min": 0.00921370624046176,
"max": 0.02436254124622792,
"count": 241
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.018510479600323985,
"min": 0.00921370624046176,
"max": 0.02436254124622792,
"count": 241
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10518327032526334,
"min": 0.0008564350117618839,
"max": 0.1266746498644352,
"count": 241
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10518327032526334,
"min": 0.0008564350117618839,
"max": 0.1266746498644352,
"count": 241
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10649096717437108,
"min": 0.0008512460762479652,
"max": 0.12907827720046045,
"count": 241
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10649096717437108,
"min": 0.0008512460762479652,
"max": 0.12907827720046045,
"count": 241
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 241
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 241
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 241
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 241
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 241
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 241
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1716801003",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/media/fast/code/learning/venv_learning/bin/mlagents-learn ./ml-agents/config/poca/SoccerTwos.yaml --env=./ml-agents/training-envs-executables/linux/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1716806378"
},
"total": 5374.680278655025,
"count": 1,
"self": 0.3679984110640362,
"children": {
"run_training.setup": {
"total": 0.14300561393611133,
"count": 1,
"self": 0.14300561393611133
},
"TrainerController.start_learning": {
"total": 5374.169274630025,
"count": 1,
"self": 4.976561686838977,
"children": {
"TrainerController._reset_env": {
"total": 5.110102920443751,
"count": 25,
"self": 5.110102920443751
},
"TrainerController.advance": {
"total": 5363.987556022825,
"count": 344357,
"self": 4.974061955930665,
"children": {
"env_step": {
"total": 4113.432212984189,
"count": 344357,
"self": 2906.255522805848,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1203.8607318192953,
"count": 344357,
"self": 26.547632063971832,
"children": {
"TorchPolicy.evaluate": {
"total": 1177.3130997553235,
"count": 629108,
"self": 1177.3130997553235
}
}
},
"workers": {
"total": 3.315958359045908,
"count": 344357,
"self": 0.0,
"children": {
"worker_root": {
"total": 5367.593900271109,
"count": 344357,
"is_parallel": true,
"self": 2978.264871275518,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0013673820067197084,
"count": 2,
"is_parallel": true,
"self": 0.0003451470984145999,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010222349083051085,
"count": 8,
"is_parallel": true,
"self": 0.0010222349083051085
}
}
},
"UnityEnvironment.step": {
"total": 0.014498173026368022,
"count": 1,
"is_parallel": true,
"self": 0.0003251349553465843,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002796109765768051,
"count": 1,
"is_parallel": true,
"self": 0.0002796109765768051
},
"communicator.exchange": {
"total": 0.012896180036477745,
"count": 1,
"is_parallel": true,
"self": 0.012896180036477745
},
"steps_from_proto": {
"total": 0.000997247057966888,
"count": 2,
"is_parallel": true,
"self": 0.0002167389029636979,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00078050815500319,
"count": 8,
"is_parallel": true,
"self": 0.00078050815500319
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2389.30539528816,
"count": 344356,
"is_parallel": true,
"self": 105.24268618470524,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 73.29048731795046,
"count": 344356,
"is_parallel": true,
"self": 73.29048731795046
},
"communicator.exchange": {
"total": 1896.7310156814056,
"count": 344356,
"is_parallel": true,
"self": 1896.7310156814056
},
"steps_from_proto": {
"total": 314.0412061040988,
"count": 688712,
"is_parallel": true,
"self": 64.98019582079723,
"children": {
"_process_rank_one_or_two_observation": {
"total": 249.06101028330158,
"count": 2754848,
"is_parallel": true,
"self": 249.06101028330158
}
}
}
}
},
"steps_from_proto": {
"total": 0.023633707431145012,
"count": 48,
"is_parallel": true,
"self": 0.0049891985254362226,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.01864450890570879,
"count": 192,
"is_parallel": true,
"self": 0.01864450890570879
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1245.5812810827047,
"count": 344357,
"self": 36.74030586180743,
"children": {
"process_trajectory": {
"total": 552.077100536786,
"count": 344357,
"self": 551.1071592046646,
"children": {
"RLTrainer._checkpoint": {
"total": 0.9699413321213797,
"count": 10,
"self": 0.9699413321213797
}
}
},
"_update_policy": {
"total": 656.7638746841112,
"count": 241,
"self": 360.24006106797606,
"children": {
"TorchPOCAOptimizer.update": {
"total": 296.52381361613516,
"count": 7230,
"self": 296.52381361613516
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.100155249238014e-07,
"count": 1,
"self": 5.100155249238014e-07
},
"TrainerController._save_models": {
"total": 0.09505348990205675,
"count": 1,
"self": 0.000944610801525414,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09410887910053134,
"count": 1,
"self": 0.09410887910053134
}
}
}
}
}
}
}