poca-SoccerTwos / run_logs /timers.json
ProtonH's picture
First Commit
a9e6fb3
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.655691385269165,
"min": 1.604853630065918,
"max": 3.295750617980957,
"count": 1173
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 31630.328125,
"min": 20975.4140625,
"max": 142921.859375,
"count": 1173
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 61.02469135802469,
"min": 37.9453125,
"max": 999.0,
"count": 1173
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19772.0,
"min": 15044.0,
"max": 24656.0,
"count": 1173
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1681.859608738653,
"min": 1198.2212589071537,
"max": 1697.1514042097335,
"count": 1139
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 272461.2566156618,
"min": 2397.7113664165086,
"max": 420592.54139021994,
"count": 1139
},
"SoccerTwos.Step.mean": {
"value": 11729962.0,
"min": 9346.0,
"max": 11729962.0,
"count": 1173
},
"SoccerTwos.Step.sum": {
"value": 11729962.0,
"min": 9346.0,
"max": 11729962.0,
"count": 1173
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.034282613545656204,
"min": -0.13301002979278564,
"max": 0.24164287745952606,
"count": 1173
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 5.553783416748047,
"min": -22.187185287475586,
"max": 33.346717834472656,
"count": 1173
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.03710721433162689,
"min": -0.13044849038124084,
"max": 0.23275840282440186,
"count": 1173
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 6.011368751525879,
"min": -22.110445022583008,
"max": 32.12065887451172,
"count": 1173
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1173
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1173
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.24103456882782925,
"min": -0.6153846153846154,
"max": 0.5917623189912327,
"count": 1173
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 39.04760015010834,
"min": -60.84120035171509,
"max": 81.6632000207901,
"count": 1173
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.24103456882782925,
"min": -0.6153846153846154,
"max": 0.5917623189912327,
"count": 1173
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 39.04760015010834,
"min": -60.84120035171509,
"max": 81.6632000207901,
"count": 1173
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1173
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1173
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.012525795083274716,
"min": 0.010618784060837546,
"max": 0.024950614649181566,
"count": 565
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.012525795083274716,
"min": 0.010618784060837546,
"max": 0.024950614649181566,
"count": 565
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.11439260989427566,
"min": 9.553729917873473e-06,
"max": 0.12423499897122384,
"count": 565
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.11439260989427566,
"min": 9.553729917873473e-06,
"max": 0.12423499897122384,
"count": 565
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11647900144259135,
"min": 1.1408822880791073e-05,
"max": 0.12736859222253163,
"count": 565
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11647900144259135,
"min": 1.1408822880791073e-05,
"max": 0.12736859222253163,
"count": 565
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 565
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 565
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 565
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 565
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 565
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 565
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1696097243",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\F:\\Anaconda3\\envs\\protonh-rl1\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1696129105"
},
"total": 31862.466951500002,
"count": 1,
"self": 2.511634000002232,
"children": {
"run_training.setup": {
"total": 0.11477240000000055,
"count": 1,
"self": 0.11477240000000055
},
"TrainerController.start_learning": {
"total": 31859.8405451,
"count": 1,
"self": 17.501605399760592,
"children": {
"TrainerController._reset_env": {
"total": 7.557385799996531,
"count": 40,
"self": 7.557385799996531
},
"TrainerController.advance": {
"total": 31834.661665400246,
"count": 800682,
"self": 17.35468059842242,
"children": {
"env_step": {
"total": 12246.201269702273,
"count": 800682,
"self": 9710.239232002055,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2524.520591000684,
"count": 800682,
"self": 93.15535820083278,
"children": {
"TorchPolicy.evaluate": {
"total": 2431.365232799851,
"count": 1476658,
"self": 2431.365232799851
}
}
},
"workers": {
"total": 11.441446699534,
"count": 800681,
"self": 0.0,
"children": {
"worker_root": {
"total": 31829.852516000356,
"count": 800681,
"is_parallel": true,
"self": 24080.85557060062,
"children": {
"steps_from_proto": {
"total": 0.05577380000377907,
"count": 80,
"is_parallel": true,
"self": 0.012069000011796227,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.043704799991982846,
"count": 320,
"is_parallel": true,
"self": 0.043704799991982846
}
}
},
"UnityEnvironment.step": {
"total": 7748.941171599733,
"count": 800681,
"is_parallel": true,
"self": 375.85933039860447,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 333.253516900041,
"count": 800681,
"is_parallel": true,
"self": 333.253516900041
},
"communicator.exchange": {
"total": 5818.657158099784,
"count": 800681,
"is_parallel": true,
"self": 5818.657158099784
},
"steps_from_proto": {
"total": 1221.171166201303,
"count": 1601362,
"is_parallel": true,
"self": 260.5219661058833,
"children": {
"_process_rank_one_or_two_observation": {
"total": 960.6492000954198,
"count": 6405448,
"is_parallel": true,
"self": 960.6492000954198
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 19571.10571509955,
"count": 800681,
"self": 133.16959149865943,
"children": {
"process_trajectory": {
"total": 3299.218758000864,
"count": 800681,
"self": 3291.8554653008587,
"children": {
"RLTrainer._checkpoint": {
"total": 7.363292700005559,
"count": 23,
"self": 7.363292700005559
}
}
},
"_update_policy": {
"total": 16138.717365600029,
"count": 565,
"self": 1609.4895082000457,
"children": {
"TorchPOCAOptimizer.update": {
"total": 14529.227857399983,
"count": 16950,
"self": 14529.227857399983
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3999997463542968e-06,
"count": 1,
"self": 1.3999997463542968e-06
},
"TrainerController._save_models": {
"total": 0.11988709999786806,
"count": 1,
"self": 0.0069741999977850355,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11291290000008303,
"count": 1,
"self": 0.11291290000008303
}
}
}
}
}
}
}