poca-SoccerTwos / run_logs /timers.json
Josrf's picture
Soccer Twos with 24200000 timesteps.
875b5fb
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.6055675745010376,
"min": 1.4875247478485107,
"max": 3.295811176300049,
"count": 2420
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 33652.6953125,
"min": 527.3297119140625,
"max": 157908.453125,
"count": 2420
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 66.5945945945946,
"min": 37.9765625,
"max": 999.0,
"count": 2420
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19712.0,
"min": 9044.0,
"max": 31900.0,
"count": 2420
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1686.1153239654511,
"min": 1197.2849400558503,
"max": 1750.715515078577,
"count": 2319
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 249545.06794688676,
"min": 2394.5698801117005,
"max": 426092.6676963066,
"count": 2319
},
"SoccerTwos.Step.mean": {
"value": 24199796.0,
"min": 9734.0,
"max": 24199796.0,
"count": 2420
},
"SoccerTwos.Step.sum": {
"value": 24199796.0,
"min": 9734.0,
"max": 24199796.0,
"count": 2420
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.006833486258983612,
"min": -0.1149551272392273,
"max": 0.1482248604297638,
"count": 2420
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 1.0181894302368164,
"min": -25.096607208251953,
"max": 27.56982421875,
"count": 2420
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.004428248852491379,
"min": -0.11781862378120422,
"max": 0.15132680535316467,
"count": 2420
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.6598090529441833,
"min": -25.269559860229492,
"max": 28.146785736083984,
"count": 2420
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 2420
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 2420
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.0290657701908342,
"min": -0.5714285714285714,
"max": 0.3898588173529681,
"count": 2420
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -4.330799758434296,
"min": -72.610799908638,
"max": 67.44040048122406,
"count": 2420
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.0290657701908342,
"min": -0.5714285714285714,
"max": 0.3898588173529681,
"count": 2420
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -4.330799758434296,
"min": -72.610799908638,
"max": 67.44040048122406,
"count": 2420
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2420
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2420
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.020498156530447887,
"min": 0.008383814156210671,
"max": 0.024075864837504922,
"count": 1085
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.020498156530447887,
"min": 0.008383814156210671,
"max": 0.024075864837504922,
"count": 1085
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.1081407718360424,
"min": 2.2772933334636035e-07,
"max": 0.13698544303576152,
"count": 1085
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.1081407718360424,
"min": 2.2772933334636035e-07,
"max": 0.13698544303576152,
"count": 1085
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10900740101933479,
"min": 3.7239703374325475e-07,
"max": 0.13833070372541745,
"count": 1085
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10900740101933479,
"min": 3.7239703374325475e-07,
"max": 0.13833070372541745,
"count": 1085
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.00015,
"min": 0.00015,
"max": 0.00015,
"count": 1085
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.00015,
"min": 0.00015,
"max": 0.00015,
"count": 1085
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 1085
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 1085
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 1085
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 1085
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691679997",
"python_version": "3.9.3 (tags/v3.9.3:e723086, Apr 2 2021, 11:35:20) [MSC v.1928 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\sepp4\\Desktop\\ml-agents\\env\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1691741226"
},
"total": 61228.198051199994,
"count": 1,
"self": 0.2288713999951142,
"children": {
"run_training.setup": {
"total": 0.0902630999999996,
"count": 1,
"self": 0.0902630999999996
},
"TrainerController.start_learning": {
"total": 61227.8789167,
"count": 1,
"self": 35.02719569855253,
"children": {
"TrainerController._reset_env": {
"total": 5.856344300003699,
"count": 121,
"self": 5.856344300003699
},
"TrainerController.advance": {
"total": 61186.821604001445,
"count": 1664618,
"self": 33.55510029870493,
"children": {
"env_step": {
"total": 27146.106987004634,
"count": 1664618,
"self": 21252.036833308906,
"children": {
"SubprocessEnvManager._take_step": {
"total": 5872.404264497631,
"count": 1664618,
"self": 186.36553130181892,
"children": {
"TorchPolicy.evaluate": {
"total": 5686.038733195812,
"count": 3055234,
"self": 5686.038733195812
}
}
},
"workers": {
"total": 21.665889198097418,
"count": 1664618,
"self": 0.0,
"children": {
"worker_root": {
"total": 61173.88716729948,
"count": 1664618,
"is_parallel": true,
"self": 43696.29633110274,
"children": {
"steps_from_proto": {
"total": 0.18818690005060423,
"count": 242,
"is_parallel": true,
"self": 0.03884530000332953,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.1493416000472747,
"count": 968,
"is_parallel": true,
"self": 0.1493416000472747
}
}
},
"UnityEnvironment.step": {
"total": 17477.402649296688,
"count": 1664618,
"is_parallel": true,
"self": 802.4415389917267,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 665.0511958990663,
"count": 1664618,
"is_parallel": true,
"self": 665.0511958990663
},
"communicator.exchange": {
"total": 13378.97207830305,
"count": 1664618,
"is_parallel": true,
"self": 13378.97207830305
},
"steps_from_proto": {
"total": 2630.9378361028457,
"count": 3329236,
"is_parallel": true,
"self": 557.7289859072744,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2073.2088501955714,
"count": 13316944,
"is_parallel": true,
"self": 2073.2088501955714
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 34007.159516698106,
"count": 1664617,
"self": 256.4837779951704,
"children": {
"process_trajectory": {
"total": 5860.056134002857,
"count": 1664617,
"self": 5853.174722902845,
"children": {
"RLTrainer._checkpoint": {
"total": 6.881411100012201,
"count": 48,
"self": 6.881411100012201
}
}
},
"_update_policy": {
"total": 27890.61960470008,
"count": 1085,
"self": 3063.543814500452,
"children": {
"TorchPOCAOptimizer.update": {
"total": 24827.07579019963,
"count": 32559,
"self": 24827.07579019963
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2000018614344299e-06,
"count": 1,
"self": 1.2000018614344299e-06
},
"TrainerController._save_models": {
"total": 0.1737714999981108,
"count": 1,
"self": 0.007878600001276936,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16589289999683388,
"count": 1,
"self": 0.16589289999683388
}
}
}
}
}
}
}