poca-SoccerTwos / run_logs /timers.json
cthiriet's picture
First Push
0a99d56
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.259511709213257,
"min": 3.259511709213257,
"max": 3.295741319656372,
"count": 16
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 90953.4140625,
"min": 28942.8203125,
"max": 106412.890625,
"count": 16
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 791.5714285714286,
"min": 521.8,
"max": 999.0,
"count": 16
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 22164.0,
"min": 16944.0,
"max": 22372.0,
"count": 16
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1191.6922331469073,
"min": 1191.6922331469073,
"max": 1197.6300552796083,
"count": 13
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 9533.537865175258,
"min": 2384.9430510499333,
"max": 19152.344780022133,
"count": 13
},
"SoccerTwos.Step.mean": {
"value": 159596.0,
"min": 9479.0,
"max": 159596.0,
"count": 16
},
"SoccerTwos.Step.sum": {
"value": 159596.0,
"min": 9479.0,
"max": 159596.0,
"count": 16
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.013069055043160915,
"min": 0.013069055043160915,
"max": 0.10185398161411285,
"count": 16
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.16989772021770477,
"min": 0.16989772021770477,
"max": 1.9351274967193604,
"count": 16
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.012426543980836868,
"min": 0.012426543980836868,
"max": 0.10180485993623734,
"count": 16
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.161545068025589,
"min": 0.161545068025589,
"max": 1.934121012687683,
"count": 16
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 16
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 16
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.44252307598407453,
"min": -0.5370736812290392,
"max": 0.03303999900817871,
"count": 16
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -5.752799987792969,
"min": -10.204399943351746,
"max": 0.3303999900817871,
"count": 16
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.44252307598407453,
"min": -0.5370736812290392,
"max": 0.03303999900817871,
"count": 16
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -5.752799987792969,
"min": -10.204399943351746,
"max": 0.3303999900817871,
"count": 16
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01770546785288995,
"min": 0.012537877772532131,
"max": 0.021505475727220377,
"count": 7
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01770546785288995,
"min": 0.012537877772532131,
"max": 0.021505475727220377,
"count": 7
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0023161553932974734,
"min": 0.0007440834905234321,
"max": 0.008842417830601334,
"count": 7
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0023161553932974734,
"min": 0.0007440834905234321,
"max": 0.008842417830601334,
"count": 7
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.002326815548197677,
"min": 0.0007457728771745072,
"max": 0.00819348266037802,
"count": 7
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.002326815548197677,
"min": 0.0007457728771745072,
"max": 0.00819348266037802,
"count": 7
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 7
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 7
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 7
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 7
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 7
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 7
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680387416",
"python_version": "3.9.16 (main, Mar 8 2023, 04:29:24) \n[Clang 14.0.6 ]",
"command_line_arguments": "/Users/clementthiriet/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1680388079"
},
"total": 663.4638185,
"count": 1,
"self": 0.12192162500002723,
"children": {
"run_training.setup": {
"total": 0.060317459000000184,
"count": 1,
"self": 0.060317459000000184
},
"TrainerController.start_learning": {
"total": 663.281579416,
"count": 1,
"self": 0.1281013870013794,
"children": {
"TrainerController._reset_env": {
"total": 2.4665004999999987,
"count": 1,
"self": 2.4665004999999987
},
"TrainerController.advance": {
"total": 660.5726466129985,
"count": 10992,
"self": 0.11882123999976102,
"children": {
"env_step": {
"total": 548.1723853380014,
"count": 10992,
"self": 528.0303609479998,
"children": {
"SubprocessEnvManager._take_step": {
"total": 20.04637094500028,
"count": 10992,
"self": 0.5218304509988592,
"children": {
"TorchPolicy.evaluate": {
"total": 19.52454049400142,
"count": 21828,
"self": 19.52454049400142
}
}
},
"workers": {
"total": 0.09565344500122208,
"count": 10991,
"self": 0.0,
"children": {
"worker_root": {
"total": 660.4438563540028,
"count": 10991,
"is_parallel": true,
"self": 151.742179689002,
"children": {
"steps_from_proto": {
"total": 0.001600126000000035,
"count": 2,
"is_parallel": true,
"self": 0.0002107099999992812,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013894160000007538,
"count": 8,
"is_parallel": true,
"self": 0.0013894160000007538
}
}
},
"UnityEnvironment.step": {
"total": 508.7000765390008,
"count": 10991,
"is_parallel": true,
"self": 1.4707369160045118,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 8.72545943800057,
"count": 10991,
"is_parallel": true,
"self": 8.72545943800057
},
"communicator.exchange": {
"total": 478.93568528000037,
"count": 10991,
"is_parallel": true,
"self": 478.93568528000037
},
"steps_from_proto": {
"total": 19.568194904995302,
"count": 21982,
"is_parallel": true,
"self": 2.135017521992289,
"children": {
"_process_rank_one_or_two_observation": {
"total": 17.433177383003013,
"count": 87928,
"is_parallel": true,
"self": 17.433177383003013
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 112.28144003499737,
"count": 10991,
"self": 0.9418736109951027,
"children": {
"process_trajectory": {
"total": 18.29796225700239,
"count": 10991,
"self": 18.29796225700239
},
"_update_policy": {
"total": 93.04160416699987,
"count": 7,
"self": 11.6692971599999,
"children": {
"TorchPOCAOptimizer.update": {
"total": 81.37230700699997,
"count": 210,
"self": 81.37230700699997
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.24999984211172e-07,
"count": 1,
"self": 6.24999984211172e-07
},
"TrainerController._save_models": {
"total": 0.11433029100010117,
"count": 1,
"self": 0.0006430830001136201,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11368720799998755,
"count": 1,
"self": 0.11368720799998755
}
}
}
}
}
}
}