hectorjelly's picture
Eighth_Team`
d4d050a
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.7701833248138428,
"min": 1.3719481229782104,
"max": 1.871724009513855,
"count": 821
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 48545.5078125,
"min": 22207.037109375,
"max": 56677.78515625,
"count": 821
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 78.80645161290323,
"min": 42.93693693693694,
"max": 117.11627906976744,
"count": 821
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19544.0,
"min": 10080.0,
"max": 21532.0,
"count": 821
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1525.340507142494,
"min": 1479.7606759542728,
"max": 1655.5722462772112,
"count": 821
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 189142.22288566927,
"min": 127944.68200545188,
"max": 361320.466831879,
"count": 821
},
"SoccerTwos.Step.mean": {
"value": 37899944.0,
"min": 29699920.0,
"max": 37899944.0,
"count": 821
},
"SoccerTwos.Step.sum": {
"value": 37899944.0,
"min": 29699920.0,
"max": 37899944.0,
"count": 821
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.058571405708789825,
"min": -0.13811704516410828,
"max": 0.10500992834568024,
"count": 821
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 7.262854099273682,
"min": -24.70503807067871,
"max": 14.176340103149414,
"count": 821
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.05869920179247856,
"min": -0.13186343014240265,
"max": 0.1037633940577507,
"count": 821
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 7.278700828552246,
"min": -25.713367462158203,
"max": 14.008058547973633,
"count": 821
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 821
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 821
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.17129032169618913,
"min": -0.40771612749304825,
"max": 0.2733866646176293,
"count": 821
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 21.239999890327454,
"min": -62.76160001754761,
"max": 43.67039978504181,
"count": 821
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.17129032169618913,
"min": -0.40771612749304825,
"max": 0.2733866646176293,
"count": 821
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 21.239999890327454,
"min": -62.76160001754761,
"max": 43.67039978504181,
"count": 821
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 821
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 821
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.02879449994652532,
"min": 0.024823492643675612,
"max": 0.0333783227474972,
"count": 199
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.02879449994652532,
"min": 0.024823492643675612,
"max": 0.0333783227474972,
"count": 199
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.07804813447097937,
"min": 0.06403212292740743,
"max": 0.10126510423918565,
"count": 199
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.07804813447097937,
"min": 0.06403212292740743,
"max": 0.10126510423918565,
"count": 199
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.08070893287658691,
"min": 0.06631886282314857,
"max": 0.10316466639439265,
"count": 199
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.08070893287658691,
"min": 0.06631886282314857,
"max": 0.10316466639439265,
"count": 199
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.001,
"min": 0.001,
"max": 0.001,
"count": 199
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.001,
"min": 0.001,
"max": 0.001,
"count": 199
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.23000000000000004,
"min": 0.23000000000000004,
"max": 0.23000000000000004,
"count": 199
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.23000000000000004,
"min": 0.23000000000000004,
"max": 0.23000000000000004,
"count": 199
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.006,
"min": 0.006,
"max": 0.006,
"count": 199
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.006,
"min": 0.006,
"max": 0.006,
"count": 199
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676556934",
"python_version": "3.9.16 (main, Jan 11 2023, 16:16:36) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\hecto\\.conda\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=Pires_and_Lundberg --no-graphics --num-envs=3 --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1676579831"
},
"total": 22897.0813614,
"count": 1,
"self": 0.0077146000003267545,
"children": {
"run_training.setup": {
"total": 0.226788,
"count": 1,
"self": 0.226788
},
"TrainerController.start_learning": {
"total": 22896.8468588,
"count": 1,
"self": 16.652563701100007,
"children": {
"TrainerController._reset_env": {
"total": 15.79089869999327,
"count": 328,
"self": 15.79089869999327
},
"TrainerController.advance": {
"total": 22864.257761498906,
"count": 565444,
"self": 15.908553398705408,
"children": {
"env_step": {
"total": 3971.9426861001193,
"count": 565444,
"self": 1417.0439159997322,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2546.786615399115,
"count": 624712,
"self": 95.58230460170853,
"children": {
"TorchPolicy.evaluate": {
"total": 2451.2043107974064,
"count": 1158892,
"self": 2451.2043107974064
}
}
},
"workers": {
"total": 8.112154701272292,
"count": 565444,
"self": 0.0,
"children": {
"worker_root": {
"total": 68241.08778149853,
"count": 624258,
"is_parallel": true,
"self": 61950.040563398084,
"children": {
"steps_from_proto": {
"total": 1.4915279000327608,
"count": 1964,
"is_parallel": true,
"self": 0.30799659997581363,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1.1835313000569472,
"count": 7856,
"is_parallel": true,
"self": 1.1835313000569472
}
}
},
"UnityEnvironment.step": {
"total": 6289.555690200412,
"count": 624258,
"is_parallel": true,
"self": 342.1256913010302,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 265.4663002990906,
"count": 624258,
"is_parallel": true,
"self": 265.4663002990906
},
"communicator.exchange": {
"total": 4668.101448000436,
"count": 624258,
"is_parallel": true,
"self": 4668.101448000436
},
"steps_from_proto": {
"total": 1013.8622505998544,
"count": 1248516,
"is_parallel": true,
"self": 209.82015870185774,
"children": {
"_process_rank_one_or_two_observation": {
"total": 804.0420918979967,
"count": 4994064,
"is_parallel": true,
"self": 804.0420918979967
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 18876.40652200008,
"count": 565444,
"self": 117.15673470124966,
"children": {
"process_trajectory": {
"total": 2148.756217498842,
"count": 565444,
"self": 2146.9036823988413,
"children": {
"RLTrainer._checkpoint": {
"total": 1.8525351000007504,
"count": 16,
"self": 1.8525351000007504
}
}
},
"_update_policy": {
"total": 16610.49356979999,
"count": 200,
"self": 1185.1533127000675,
"children": {
"TorchPOCAOptimizer.update": {
"total": 15425.340257099922,
"count": 11999,
"self": 15425.340257099922
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5000005078036338e-06,
"count": 1,
"self": 1.5000005078036338e-06
},
"TrainerController._save_models": {
"total": 0.14563339999949676,
"count": 1,
"self": 0.02459169999929145,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12104170000020531,
"count": 1,
"self": 0.12104170000020531
}
}
}
}
}
}
}