poca-SoccerTwos / run_logs /timers.json
GerardMR's picture
First Push
a58527a
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.4285483360290527,
"min": 2.4123215675354004,
"max": 3.295747995376587,
"count": 572
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 46938.984375,
"min": 18835.41796875,
"max": 148510.625,
"count": 572
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 53.03333333333333,
"min": 44.24770642201835,
"max": 999.0,
"count": 572
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19092.0,
"min": 16184.0,
"max": 23856.0,
"count": 572
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1459.89320978207,
"min": 1182.3895751875943,
"max": 1463.7659452196,
"count": 414
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 262780.7777607726,
"min": 2365.104459575471,
"max": 318731.5104809692,
"count": 414
},
"SoccerTwos.Step.mean": {
"value": 5719955.0,
"min": 9944.0,
"max": 5719955.0,
"count": 572
},
"SoccerTwos.Step.sum": {
"value": 5719955.0,
"min": 9944.0,
"max": 5719955.0,
"count": 572
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.0042080082930624485,
"min": -0.060339827090501785,
"max": 0.17207317054271698,
"count": 572
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.761649489402771,
"min": -10.137090682983398,
"max": 28.5672664642334,
"count": 572
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.013848502188920975,
"min": -0.06162046268582344,
"max": 0.1689547300338745,
"count": 572
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -2.5065789222717285,
"min": -10.352237701416016,
"max": 28.553348541259766,
"count": 572
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 572
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 572
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.03215469708100208,
"min": -0.5,
"max": 0.5003499984741211,
"count": 572
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 5.820000171661377,
"min": -37.40279978513718,
"max": 40.02560031414032,
"count": 572
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.03215469708100208,
"min": -0.5,
"max": 0.5003499984741211,
"count": 572
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 5.820000171661377,
"min": -37.40279978513718,
"max": 40.02560031414032,
"count": 572
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 572
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 572
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.013975370206753723,
"min": 0.01113784731811999,
"max": 0.02317632376604403,
"count": 267
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.013975370206753723,
"min": 0.01113784731811999,
"max": 0.02317632376604403,
"count": 267
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10773070926467578,
"min": 6.605348067031021e-08,
"max": 0.10773070926467578,
"count": 267
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10773070926467578,
"min": 6.605348067031021e-08,
"max": 0.10773070926467578,
"count": 267
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.1098634255429109,
"min": 6.81121953505226e-08,
"max": 0.1098634255429109,
"count": 267
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.1098634255429109,
"min": 6.81121953505226e-08,
"max": 0.1098634255429109,
"count": 267
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 267
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 267
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 267
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 267
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 267
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 267
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1701112940",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/home/gerard/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1701128321"
},
"total": 15380.66373492,
"count": 1,
"self": 0.03863696200096456,
"children": {
"run_training.setup": {
"total": 0.01112268499991842,
"count": 1,
"self": 0.01112268499991842
},
"TrainerController.start_learning": {
"total": 15380.613975272998,
"count": 1,
"self": 8.447987040233784,
"children": {
"TrainerController._reset_env": {
"total": 2.77713225499906,
"count": 29,
"self": 2.77713225499906
},
"TrainerController.advance": {
"total": 15369.187369838766,
"count": 376159,
"self": 8.412951710271955,
"children": {
"env_step": {
"total": 7363.122731359546,
"count": 376159,
"self": 5943.645075474607,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1413.947692581413,
"count": 376159,
"self": 46.85850683716876,
"children": {
"TorchPolicy.evaluate": {
"total": 1367.0891857442443,
"count": 732332,
"self": 1367.0891857442443
}
}
},
"workers": {
"total": 5.529963303525619,
"count": 376159,
"self": 0.0,
"children": {
"worker_root": {
"total": 15348.942659730743,
"count": 376159,
"is_parallel": true,
"self": 10437.356349070564,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0025318650000372145,
"count": 2,
"is_parallel": true,
"self": 0.0011279999998805579,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014038650001566566,
"count": 8,
"is_parallel": true,
"self": 0.0014038650001566566
}
}
},
"UnityEnvironment.step": {
"total": 0.027527491000000737,
"count": 1,
"is_parallel": true,
"self": 0.0008405670002957777,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006389699999544973,
"count": 1,
"is_parallel": true,
"self": 0.0006389699999544973
},
"communicator.exchange": {
"total": 0.023571671999889077,
"count": 1,
"is_parallel": true,
"self": 0.023571671999889077
},
"steps_from_proto": {
"total": 0.002476281999861385,
"count": 2,
"is_parallel": true,
"self": 0.0004359760002898838,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002040305999571501,
"count": 8,
"is_parallel": true,
"self": 0.002040305999571501
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4911.523292744176,
"count": 376158,
"is_parallel": true,
"self": 300.24126463963603,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 215.83150862467664,
"count": 376158,
"is_parallel": true,
"self": 215.83150862467664
},
"communicator.exchange": {
"total": 3508.5953525422265,
"count": 376158,
"is_parallel": true,
"self": 3508.5953525422265
},
"steps_from_proto": {
"total": 886.8551669376368,
"count": 752316,
"is_parallel": true,
"self": 157.72267335778747,
"children": {
"_process_rank_one_or_two_observation": {
"total": 729.1324935798493,
"count": 3009264,
"is_parallel": true,
"self": 729.1324935798493
}
}
}
}
},
"steps_from_proto": {
"total": 0.06301791600344586,
"count": 56,
"is_parallel": true,
"self": 0.011793493995810422,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.05122442200763544,
"count": 224,
"is_parallel": true,
"self": 0.05122442200763544
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 7997.651686768947,
"count": 376159,
"self": 66.63243717227851,
"children": {
"process_trajectory": {
"total": 989.4638971836682,
"count": 376159,
"self": 987.3272013546682,
"children": {
"RLTrainer._checkpoint": {
"total": 2.1366958290000184,
"count": 11,
"self": 2.1366958290000184
}
}
},
"_update_policy": {
"total": 6941.555352413,
"count": 268,
"self": 687.9706895610225,
"children": {
"TorchPOCAOptimizer.update": {
"total": 6253.584662851978,
"count": 8036,
"self": 6253.584662851978
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2359996617306024e-06,
"count": 1,
"self": 1.2359996617306024e-06
},
"TrainerController._save_models": {
"total": 0.2014849029983452,
"count": 1,
"self": 0.002181223000661703,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1993036799976835,
"count": 1,
"self": 0.1993036799976835
}
}
}
}
}
}
}