poca-SoccerTwos / run_logs /timers.json
vpepe2003's picture
First Push
a82a3e4 verified
raw
history blame
20.1 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.9442510604858398,
"min": 1.9251229763031006,
"max": 3.2957751750946045,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 38013.99609375,
"min": 28760.005859375,
"max": 117699.8515625,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 49.83,
"min": 40.16528925619835,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19932.0,
"min": 11668.0,
"max": 30992.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1503.2260117014357,
"min": 1198.7234718671002,
"max": 1519.056163524618,
"count": 470
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 300645.20234028716,
"min": 2397.4469437342004,
"max": 358067.5392072537,
"count": 470
},
"SoccerTwos.Step.mean": {
"value": 4999990.0,
"min": 9514.0,
"max": 4999990.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999990.0,
"min": 9514.0,
"max": 4999990.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.015706663951277733,
"min": -0.0942673608660698,
"max": 0.14426925778388977,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 3.1256260871887207,
"min": -16.119718551635742,
"max": 24.510446548461914,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.013603504747152328,
"min": -0.0953456461429596,
"max": 0.1495077759027481,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 2.7070975303649902,
"min": -16.304105758666992,
"max": 24.941312789916992,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.011024119865954222,
"min": -0.6497999990687651,
"max": 0.37245106443445736,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -2.19379985332489,
"min": -54.325600028038025,
"max": 65.07360023260117,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.011024119865954222,
"min": -0.6497999990687651,
"max": 0.37245106443445736,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -2.19379985332489,
"min": -54.325600028038025,
"max": 65.07360023260117,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.018317764533761268,
"min": 0.010698239152164508,
"max": 0.0232989558018744,
"count": 239
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.018317764533761268,
"min": 0.010698239152164508,
"max": 0.0232989558018744,
"count": 239
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.11499877870082856,
"min": 2.458588736165742e-05,
"max": 0.1263156647483508,
"count": 239
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.11499877870082856,
"min": 2.458588736165742e-05,
"max": 0.1263156647483508,
"count": 239
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11676812221606572,
"min": 2.4948281437294404e-05,
"max": 0.12844556644558908,
"count": 239
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11676812221606572,
"min": 2.4948281437294404e-05,
"max": 0.12844556644558908,
"count": 239
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 239
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 239
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 239
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 239
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 239
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 239
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1707614940",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/home/victor/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1707627707"
},
"total": 12766.547348516993,
"count": 1,
"self": 0.32042913197074085,
"children": {
"run_training.setup": {
"total": 0.01880163702298887,
"count": 1,
"self": 0.01880163702298887
},
"TrainerController.start_learning": {
"total": 12766.208117748,
"count": 1,
"self": 5.962926205189433,
"children": {
"TrainerController._reset_env": {
"total": 2.66041523302556,
"count": 25,
"self": 2.66041523302556
},
"TrainerController.advance": {
"total": 12757.38144438478,
"count": 338538,
"self": 6.1658543918747455,
"children": {
"env_step": {
"total": 4587.873034044984,
"count": 338538,
"self": 3592.0257112507825,
"children": {
"SubprocessEnvManager._take_step": {
"total": 991.9554720758752,
"count": 338538,
"self": 36.427117089449894,
"children": {
"TorchPolicy.evaluate": {
"total": 955.5283549864253,
"count": 633286,
"self": 955.5283549864253
}
}
},
"workers": {
"total": 3.891850718326168,
"count": 338538,
"self": 0.0,
"children": {
"worker_root": {
"total": 12756.9396989425,
"count": 338538,
"is_parallel": true,
"self": 9884.010677731188,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003197050013113767,
"count": 2,
"is_parallel": true,
"self": 0.0015253629826474935,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016716870304662734,
"count": 8,
"is_parallel": true,
"self": 0.0016716870304662734
}
}
},
"UnityEnvironment.step": {
"total": 0.027346729999408126,
"count": 1,
"is_parallel": true,
"self": 0.0005745459930039942,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003659430076368153,
"count": 1,
"is_parallel": true,
"self": 0.0003659430076368153
},
"communicator.exchange": {
"total": 0.024912182008847594,
"count": 1,
"is_parallel": true,
"self": 0.024912182008847594
},
"steps_from_proto": {
"total": 0.001494058989919722,
"count": 2,
"is_parallel": true,
"self": 0.0003006889601238072,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011933700297959149,
"count": 8,
"is_parallel": true,
"self": 0.0011933700297959149
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2872.8941820973996,
"count": 338537,
"is_parallel": true,
"self": 171.00910928664962,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 121.2719798910548,
"count": 338537,
"is_parallel": true,
"self": 121.2719798910548
},
"communicator.exchange": {
"total": 2083.245992752578,
"count": 338537,
"is_parallel": true,
"self": 2083.245992752578
},
"steps_from_proto": {
"total": 497.3671001671173,
"count": 677074,
"is_parallel": true,
"self": 96.80499770634924,
"children": {
"_process_rank_one_or_two_observation": {
"total": 400.5621024607681,
"count": 2708296,
"is_parallel": true,
"self": 400.5621024607681
}
}
}
}
},
"steps_from_proto": {
"total": 0.034839113912312314,
"count": 48,
"is_parallel": true,
"self": 0.006737367890309542,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.02810174602200277,
"count": 192,
"is_parallel": true,
"self": 0.02810174602200277
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 8163.342555947922,
"count": 338538,
"self": 46.3605592820968,
"children": {
"process_trajectory": {
"total": 1028.2696332178602,
"count": 338538,
"self": 1024.4807991608395,
"children": {
"RLTrainer._checkpoint": {
"total": 3.788834057020722,
"count": 10,
"self": 3.788834057020722
}
}
},
"_update_policy": {
"total": 7088.712363447965,
"count": 239,
"self": 521.196462993772,
"children": {
"TorchPOCAOptimizer.update": {
"total": 6567.515900454193,
"count": 7173,
"self": 6567.515900454193
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.539921741932631e-07,
"count": 1,
"self": 6.539921741932631e-07
},
"TrainerController._save_models": {
"total": 0.203331271011848,
"count": 1,
"self": 0.001419581996742636,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20191168901510537,
"count": 1,
"self": 0.20191168901510537
}
}
}
}
}
}
}