poca-SoccerTwos / run_logs /timers.json
milotix's picture
First Push`
d1d72d8 verified
raw
history blame
15.6 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.1268997192382812,
"min": 3.1217541694641113,
"max": 3.295693874359131,
"count": 139
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 43826.625,
"min": 23809.28515625,
"max": 122828.046875,
"count": 139
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 775.6666666666666,
"min": 377.5,
"max": 999.0,
"count": 139
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 18616.0,
"min": 15216.0,
"max": 24432.0,
"count": 139
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1204.7356625024615,
"min": 1195.2211859297265,
"max": 1205.673527009781,
"count": 112
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 4818.942650009846,
"min": 2390.442371859453,
"max": 16740.325644081386,
"count": 112
},
"SoccerTwos.Step.mean": {
"value": 1389016.0,
"min": 9232.0,
"max": 1389016.0,
"count": 139
},
"SoccerTwos.Step.sum": {
"value": 1389016.0,
"min": 9232.0,
"max": 1389016.0,
"count": 139
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.005245966371148825,
"min": -0.0989176332950592,
"max": 0.011961364187300205,
"count": 139
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.06295159459114075,
"min": -1.7785413265228271,
"max": 0.1554977297782898,
"count": 139
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.006490865256637335,
"min": -0.0968659445643425,
"max": 0.010837414301931858,
"count": 139
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.07789038121700287,
"min": -1.5643390417099,
"max": 0.14088638126850128,
"count": 139
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 139
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 139
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.08589999998609225,
"min": -0.5542222222106324,
"max": 0.34406666954358417,
"count": 139
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -1.030799999833107,
"min": -9.975999999791384,
"max": 5.161000043153763,
"count": 139
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.08589999998609225,
"min": -0.5542222222106324,
"max": 0.34406666954358417,
"count": 139
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -1.030799999833107,
"min": -9.975999999791384,
"max": 5.161000043153763,
"count": 139
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 139
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 139
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.014213207856907198,
"min": 0.012458737885269026,
"max": 0.02325381284269194,
"count": 64
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.014213207856907198,
"min": 0.012458737885269026,
"max": 0.02325381284269194,
"count": 64
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0028983446303755046,
"min": 1.6160732532929007e-05,
"max": 0.006475627468898892,
"count": 64
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0028983446303755046,
"min": 1.6160732532929007e-05,
"max": 0.006475627468898892,
"count": 64
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0030047286689902346,
"min": 1.6385227778907088e-05,
"max": 0.006480514382322629,
"count": 64
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0030047286689902346,
"min": 1.6385227778907088e-05,
"max": 0.006480514382322629,
"count": 64
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 64
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 64
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 64
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 64
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 64
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 64
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1710863605",
"python_version": "3.10.12 | packaged by conda-forge | (main, Jun 23 2023, 22:34:57) [MSC v.1936 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\milot\\.conda\\envs\\rl-2v2\\Scripts\\mlagents-learn config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1710865438"
},
"total": 1833.1905421999982,
"count": 1,
"self": 0.06785500003024936,
"children": {
"run_training.setup": {
"total": 0.05275539995636791,
"count": 1,
"self": 0.05275539995636791
},
"TrainerController.start_learning": {
"total": 1833.0699318000115,
"count": 1,
"self": 1.30894049606286,
"children": {
"TrainerController._reset_env": {
"total": 3.5227953002322465,
"count": 7,
"self": 3.5227953002322465
},
"TrainerController.advance": {
"total": 1828.1491721038474,
"count": 90656,
"self": 1.3846663229633123,
"children": {
"env_step": {
"total": 898.2950262710219,
"count": 90656,
"self": 681.8905553456862,
"children": {
"SubprocessEnvManager._take_step": {
"total": 215.5549817046849,
"count": 90656,
"self": 8.214683901169337,
"children": {
"TorchPolicy.evaluate": {
"total": 207.34029780351557,
"count": 179870,
"self": 207.34029780351557
}
}
},
"workers": {
"total": 0.8494892206508666,
"count": 90656,
"self": 0.0,
"children": {
"worker_root": {
"total": 1827.5537074055756,
"count": 90656,
"is_parallel": true,
"self": 1292.482827128726,
"children": {
"steps_from_proto": {
"total": 0.0067440998973324895,
"count": 14,
"is_parallel": true,
"self": 0.0013416995061561465,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.005402400391176343,
"count": 56,
"is_parallel": true,
"self": 0.005402400391176343
}
}
},
"UnityEnvironment.step": {
"total": 535.0641361769522,
"count": 90656,
"is_parallel": true,
"self": 27.684915057965554,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 20.183894310728647,
"count": 90656,
"is_parallel": true,
"self": 20.183894310728647
},
"communicator.exchange": {
"total": 403.7165460069664,
"count": 90656,
"is_parallel": true,
"self": 403.7165460069664
},
"steps_from_proto": {
"total": 83.47878080129158,
"count": 181312,
"is_parallel": true,
"self": 16.029779013362713,
"children": {
"_process_rank_one_or_two_observation": {
"total": 67.44900178792886,
"count": 725248,
"is_parallel": true,
"self": 67.44900178792886
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 928.4694795098621,
"count": 90656,
"self": 10.494383209501393,
"children": {
"process_trajectory": {
"total": 134.1124574005371,
"count": 90656,
"self": 133.935653400491,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1768040000461042,
"count": 2,
"self": 0.1768040000461042
}
}
},
"_update_policy": {
"total": 783.8626388998237,
"count": 65,
"self": 115.99846580310259,
"children": {
"TorchPOCAOptimizer.update": {
"total": 667.8641730967211,
"count": 1924,
"self": 667.8641730967211
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.998896762728691e-07,
"count": 1,
"self": 7.998896762728691e-07
},
"TrainerController._save_models": {
"total": 0.08902309997938573,
"count": 1,
"self": 0.007825999986380339,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0811970999930054,
"count": 1,
"self": 0.0811970999930054
}
}
}
}
}
}
}