poca-SoccerTwos / run_logs /timers.json
Winmodel's picture
Push`
5debdaf
raw
history blame
15.6 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.057386875152588,
"min": 2.047987222671509,
"max": 3.2957231998443604,
"count": 350
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 38645.953125,
"min": 1786.855712890625,
"max": 105463.140625,
"count": 350
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 75.6,
"min": 45.691588785046726,
"max": 999.0,
"count": 350
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19656.0,
"min": 3996.0,
"max": 29868.0,
"count": 350
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1571.1354581603364,
"min": 1200.4942694081915,
"max": 1589.1536115560243,
"count": 349
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 204247.60956084373,
"min": 2401.4985608882907,
"max": 315393.24823603884,
"count": 349
},
"SoccerTwos.Step.mean": {
"value": 3499930.0,
"min": 9034.0,
"max": 3499930.0,
"count": 350
},
"SoccerTwos.Step.sum": {
"value": 3499930.0,
"min": 9034.0,
"max": 3499930.0,
"count": 350
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.058072905987501144,
"min": -0.08106650412082672,
"max": 0.2032850682735443,
"count": 350
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -7.549477577209473,
"min": -13.132773399353027,
"max": 25.35916519165039,
"count": 350
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.05906663462519646,
"min": -0.09454289078712463,
"max": 0.20621255040168762,
"count": 350
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -7.678662300109863,
"min": -15.315948486328125,
"max": 25.47676658630371,
"count": 350
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 350
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 350
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.15642307767501243,
"min": -0.483433964117518,
"max": 0.4754314260823386,
"count": 350
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -20.335000097751617,
"min": -57.46280014514923,
"max": 58.32879984378815,
"count": 350
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.15642307767501243,
"min": -0.483433964117518,
"max": 0.4754314260823386,
"count": 350
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -20.335000097751617,
"min": -57.46280014514923,
"max": 58.32879984378815,
"count": 350
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 350
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 350
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017345637931430246,
"min": 0.011500285739991038,
"max": 0.023056052706670015,
"count": 168
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017345637931430246,
"min": 0.011500285739991038,
"max": 0.023056052706670015,
"count": 168
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09374586542447408,
"min": 0.0014979356166718173,
"max": 0.10577593992153804,
"count": 168
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09374586542447408,
"min": 0.0014979356166718173,
"max": 0.10577593992153804,
"count": 168
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09456879571080208,
"min": 0.001284183319685759,
"max": 0.10749490931630135,
"count": 168
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09456879571080208,
"min": 0.001284183319685759,
"max": 0.10749490931630135,
"count": 168
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 168
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 168
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 168
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 168
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 168
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 168
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689165746",
"python_version": "3.9.17 (main, Jul 5 2023, 20:47:11) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\winro\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.0+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1689180652"
},
"total": 14905.081364500002,
"count": 1,
"self": 0.9547617000007449,
"children": {
"run_training.setup": {
"total": 0.16417020000000004,
"count": 1,
"self": 0.16417020000000004
},
"TrainerController.start_learning": {
"total": 14903.962432600001,
"count": 1,
"self": 8.23313109989067,
"children": {
"TrainerController._reset_env": {
"total": 3.4831855999967347,
"count": 18,
"self": 3.4831855999967347
},
"TrainerController.advance": {
"total": 14892.027613300113,
"count": 238013,
"self": 8.806057099965983,
"children": {
"env_step": {
"total": 6161.397973400028,
"count": 238013,
"self": 4999.779219000455,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1156.4932071999556,
"count": 238013,
"self": 49.4476339000189,
"children": {
"TorchPolicy.evaluate": {
"total": 1107.0455732999367,
"count": 440930,
"self": 1107.0455732999367
}
}
},
"workers": {
"total": 5.125547199616794,
"count": 238013,
"self": 0.0,
"children": {
"worker_root": {
"total": 14888.307397000319,
"count": 238013,
"is_parallel": true,
"self": 10833.173858600108,
"children": {
"steps_from_proto": {
"total": 0.04477829999348826,
"count": 36,
"is_parallel": true,
"self": 0.009572499993747652,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.035205799999740606,
"count": 144,
"is_parallel": true,
"self": 0.035205799999740606
}
}
},
"UnityEnvironment.step": {
"total": 4055.0887601002164,
"count": 238013,
"is_parallel": true,
"self": 209.12338990089256,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 164.92480479982152,
"count": 238013,
"is_parallel": true,
"self": 164.92480479982152
},
"communicator.exchange": {
"total": 3038.6139916999327,
"count": 238013,
"is_parallel": true,
"self": 3038.6139916999327
},
"steps_from_proto": {
"total": 642.4265736995692,
"count": 476026,
"is_parallel": true,
"self": 137.1093524993388,
"children": {
"_process_rank_one_or_two_observation": {
"total": 505.3172212002304,
"count": 1904104,
"is_parallel": true,
"self": 505.3172212002304
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 8721.82358280012,
"count": 238013,
"self": 54.33259610023924,
"children": {
"process_trajectory": {
"total": 1894.9583867998858,
"count": 238013,
"self": 1893.2951836998864,
"children": {
"RLTrainer._checkpoint": {
"total": 1.663203099999464,
"count": 7,
"self": 1.663203099999464
}
}
},
"_update_policy": {
"total": 6772.532599899995,
"count": 168,
"self": 740.2799666999108,
"children": {
"TorchPOCAOptimizer.update": {
"total": 6032.252633200084,
"count": 5052,
"self": 6032.252633200084
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5000005078036338e-06,
"count": 1,
"self": 1.5000005078036338e-06
},
"TrainerController._save_models": {
"total": 0.21850110000013956,
"count": 1,
"self": 0.011879300000146031,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20662179999999353,
"count": 1,
"self": 0.20662179999999353
}
}
}
}
}
}
}