poca-SoccerTwos / run_logs /timers.json
Armageddon
3M steps
a644223 verified
raw
history blame
20.2 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.201995611190796,
"min": 2.173480272293091,
"max": 2.314014196395874,
"count": 100
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 44674.0859375,
"min": 37297.90234375,
"max": 52114.0,
"count": 100
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 65.52054794520548,
"min": 54.666666666666664,
"max": 93.9423076923077,
"count": 100
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19132.0,
"min": 18736.0,
"max": 20684.0,
"count": 100
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1501.2977191837756,
"min": 1448.428316190716,
"max": 1503.7782581484385,
"count": 100
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 219189.46700083124,
"min": 147811.67781677181,
"max": 279689.9748241801,
"count": 100
},
"SoccerTwos.Step.mean": {
"value": 2999970.0,
"min": 2009991.0,
"max": 2999970.0,
"count": 100
},
"SoccerTwos.Step.sum": {
"value": 2999970.0,
"min": 2009991.0,
"max": 2999970.0,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.07235688716173172,
"min": -0.1096392348408699,
"max": 0.09858974814414978,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -10.564105987548828,
"min": -15.104362487792969,
"max": 14.196924209594727,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.07161447405815125,
"min": -0.10906262695789337,
"max": 0.10088992118835449,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -10.455713272094727,
"min": -14.786467552185059,
"max": 14.528148651123047,
"count": 100
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 100
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.07710684897148445,
"min": -0.42398394241820286,
"max": 0.2856749995715088,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -11.257599949836731,
"min": -58.08580011129379,
"max": 41.13719993829727,
"count": 100
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.07710684897148445,
"min": -0.42398394241820286,
"max": 0.2856749995715088,
"count": 100
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -11.257599949836731,
"min": -58.08580011129379,
"max": 41.13719993829727,
"count": 100
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.020043744097711168,
"min": 0.017251228770085923,
"max": 0.03185359615987788,
"count": 96
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.020043744097711168,
"min": 0.017251228770085923,
"max": 0.03185359615987788,
"count": 96
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.1033657560745875,
"min": 0.0701694172496597,
"max": 0.1033657560745875,
"count": 96
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.1033657560745875,
"min": 0.0701694172496597,
"max": 0.1033657560745875,
"count": 96
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10660624156395594,
"min": 0.07164305417488019,
"max": 0.10660624156395594,
"count": 96
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10660624156395594,
"min": 0.07164305417488019,
"max": 0.10660624156395594,
"count": 96
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.00010000000000000003,
"min": 0.00010000000000000003,
"max": 0.00010000000000000003,
"count": 96
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.00010000000000000003,
"min": 0.00010000000000000003,
"max": 0.00010000000000000003,
"count": 96
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.30000000000000004,
"min": 0.30000000000000004,
"max": 0.30000000000000004,
"count": 96
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.30000000000000004,
"min": 0.30000000000000004,
"max": 0.30000000000000004,
"count": 96
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.010000000000000002,
"min": 0.010000000000000002,
"max": 0.010000000000000002,
"count": 96
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.010000000000000002,
"min": 0.010000000000000002,
"max": 0.010000000000000002,
"count": 96
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1718276835",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn config/poca/SoccerTwos.yaml --env=training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1718279378"
},
"total": 2543.1800257669997,
"count": 1,
"self": 0.4459229669996603,
"children": {
"run_training.setup": {
"total": 0.051663405999988754,
"count": 1,
"self": 0.051663405999988754
},
"TrainerController.start_learning": {
"total": 2542.682439394,
"count": 1,
"self": 1.7609745530094187,
"children": {
"TrainerController._reset_env": {
"total": 2.4089116700005206,
"count": 6,
"self": 2.4089116700005206
},
"TrainerController.advance": {
"total": 2538.2034783439904,
"count": 68323,
"self": 1.7323525409487956,
"children": {
"env_step": {
"total": 1754.4153196360703,
"count": 68323,
"self": 1334.566942097061,
"children": {
"SubprocessEnvManager._take_step": {
"total": 418.84384843703094,
"count": 68323,
"self": 12.063535462993457,
"children": {
"TorchPolicy.evaluate": {
"total": 406.7803129740375,
"count": 125570,
"self": 406.7803129740375
}
}
},
"workers": {
"total": 1.0045291019783917,
"count": 68323,
"self": 0.0,
"children": {
"worker_root": {
"total": 2537.1526747409644,
"count": 68323,
"is_parallel": true,
"self": 1416.4915292459632,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0033443359999978384,
"count": 2,
"is_parallel": true,
"self": 0.0008517240000287529,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0024926119999690854,
"count": 8,
"is_parallel": true,
"self": 0.0024926119999690854
}
}
},
"UnityEnvironment.step": {
"total": 0.03887877500000059,
"count": 1,
"is_parallel": true,
"self": 0.0011581419999515674,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0008241240000188554,
"count": 1,
"is_parallel": true,
"self": 0.0008241240000188554
},
"communicator.exchange": {
"total": 0.03348753300002727,
"count": 1,
"is_parallel": true,
"self": 0.03348753300002727
},
"steps_from_proto": {
"total": 0.0034089760000028946,
"count": 2,
"is_parallel": true,
"self": 0.0005874099998095517,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002821566000193343,
"count": 8,
"is_parallel": true,
"self": 0.002821566000193343
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.009957218000010926,
"count": 10,
"is_parallel": true,
"self": 0.0020800090006787286,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.007877208999332197,
"count": 40,
"is_parallel": true,
"self": 0.007877208999332197
}
}
},
"UnityEnvironment.step": {
"total": 1120.6511882770012,
"count": 68322,
"is_parallel": true,
"self": 69.13649921701744,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 45.612905000973626,
"count": 68322,
"is_parallel": true,
"self": 45.612905000973626
},
"communicator.exchange": {
"total": 788.4737009670148,
"count": 68322,
"is_parallel": true,
"self": 788.4737009670148
},
"steps_from_proto": {
"total": 217.42808309199535,
"count": 136644,
"is_parallel": true,
"self": 36.7278482309996,
"children": {
"_process_rank_one_or_two_observation": {
"total": 180.70023486099575,
"count": 546576,
"is_parallel": true,
"self": 180.70023486099575
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 782.0558061669711,
"count": 68323,
"self": 12.882832155030542,
"children": {
"process_trajectory": {
"total": 215.49916042294052,
"count": 68323,
"self": 214.983963919941,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5151965029995154,
"count": 2,
"self": 0.5151965029995154
}
}
},
"_update_policy": {
"total": 553.673813589,
"count": 96,
"self": 299.7714528620177,
"children": {
"TorchPOCAOptimizer.update": {
"total": 253.9023607269823,
"count": 5760,
"self": 253.9023607269823
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.130000423989259e-07,
"count": 1,
"self": 8.130000423989259e-07
},
"TrainerController._save_models": {
"total": 0.3090740139996342,
"count": 1,
"self": 0.0040466209998157865,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3050273929998184,
"count": 1,
"self": 0.3050273929998184
}
}
}
}
}
}
}