poca-SoccerTwos / run_logs /timers.json
beeks11's picture
First Push
526c77b
raw
history blame
15.2 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.276836395263672,
"min": 3.276836395263672,
"max": 3.295710563659668,
"count": 5
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 99301.25,
"min": 49026.21875,
"max": 116492.6796875,
"count": 5
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 494.1,
"min": 402.53846153846155,
"max": 826.5,
"count": 5
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19764.0,
"min": 13904.0,
"max": 26448.0,
"count": 5
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1196.973240627599,
"min": 1196.1575554143353,
"max": 1200.0057398668362,
"count": 5
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 9575.785925020791,
"min": 4784.630221657341,
"max": 14400.068878402035,
"count": 5
},
"SoccerTwos.Step.mean": {
"value": 49481.0,
"min": 9240.0,
"max": 49481.0,
"count": 5
},
"SoccerTwos.Step.sum": {
"value": 49481.0,
"min": 9240.0,
"max": 49481.0,
"count": 5
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.017755256965756416,
"min": 0.015964774414896965,
"max": 0.017755256965756416,
"count": 5
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.35510513186454773,
"min": 0.19157728552818298,
"max": 0.4559280276298523,
"count": 5
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.02090129442512989,
"min": 0.01710919477045536,
"max": 0.021780377253890038,
"count": 5
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.4180258810520172,
"min": 0.20531034469604492,
"max": 0.5434314608573914,
"count": 5
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.09177999794483185,
"min": -0.183007694207705,
"max": -0.025988236946218154,
"count": 5
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -1.835599958896637,
"min": -4.75820004940033,
"max": -0.4418000280857086,
"count": 5
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.09177999794483185,
"min": -0.183007694207705,
"max": -0.025988236946218154,
"count": 5
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -1.835599958896637,
"min": -4.75820004940033,
"max": -0.4418000280857086,
"count": 5
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.020048053671295443,
"min": 0.013956675225441965,
"max": 0.020048053671295443,
"count": 2
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.020048053671295443,
"min": 0.013956675225441965,
"max": 0.020048053671295443,
"count": 2
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.003968271519988775,
"min": 0.003968271519988775,
"max": 0.004949383491960665,
"count": 2
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.003968271519988775,
"min": 0.003968271519988775,
"max": 0.004949383491960665,
"count": 2
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.004005392733961344,
"min": 0.004005392733961344,
"max": 0.004720366552161673,
"count": 2
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.004005392733961344,
"min": 0.004005392733961344,
"max": 0.004720366552161673,
"count": 2
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 2
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 2
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1701133238",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\nicos\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1701133626"
},
"total": 387.8955280000009,
"count": 1,
"self": 0.7812288999994053,
"children": {
"run_training.setup": {
"total": 0.31763080000382615,
"count": 1,
"self": 0.31763080000382615
},
"TrainerController.start_learning": {
"total": 386.7966682999977,
"count": 1,
"self": 0.27002319980965694,
"children": {
"TrainerController._reset_env": {
"total": 11.539739800005918,
"count": 3,
"self": 11.539739800005918
},
"TrainerController.advance": {
"total": 374.41705950017786,
"count": 4322,
"self": 0.310172600155056,
"children": {
"env_step": {
"total": 238.48301200011338,
"count": 4322,
"self": 186.4302416002538,
"children": {
"SubprocessEnvManager._take_step": {
"total": 51.87918779975735,
"count": 4322,
"self": 2.039363099473121,
"children": {
"TorchPolicy.evaluate": {
"total": 49.839824700284225,
"count": 8600,
"self": 49.839824700284225
}
}
},
"workers": {
"total": 0.17358260010223603,
"count": 4322,
"self": 0.0,
"children": {
"worker_root": {
"total": 374.8027044998162,
"count": 4322,
"is_parallel": true,
"self": 226.2311640998887,
"children": {
"steps_from_proto": {
"total": 0.016620599992165808,
"count": 6,
"is_parallel": true,
"self": 0.0030250999989220873,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.01359549999324372,
"count": 24,
"is_parallel": true,
"self": 0.01359549999324372
}
}
},
"UnityEnvironment.step": {
"total": 148.5549197999353,
"count": 4322,
"is_parallel": true,
"self": 8.05323410051642,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.7022263000835665,
"count": 4322,
"is_parallel": true,
"self": 6.7022263000835665
},
"communicator.exchange": {
"total": 106.79594819962222,
"count": 4322,
"is_parallel": true,
"self": 106.79594819962222
},
"steps_from_proto": {
"total": 27.003511199713103,
"count": 8644,
"is_parallel": true,
"self": 5.400286799726018,
"children": {
"_process_rank_one_or_two_observation": {
"total": 21.603224399987084,
"count": 34576,
"is_parallel": true,
"self": 21.603224399987084
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 135.62387489990942,
"count": 4322,
"self": 1.2072394003043883,
"children": {
"process_trajectory": {
"total": 24.746448899597453,
"count": 4322,
"self": 24.746448899597453
},
"_update_policy": {
"total": 109.67018660000758,
"count": 2,
"self": 14.201118899989524,
"children": {
"TorchPOCAOptimizer.update": {
"total": 95.46906770001806,
"count": 60,
"self": 95.46906770001806
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.4000040614046156e-06,
"count": 1,
"self": 3.4000040614046156e-06
},
"TrainerController._save_models": {
"total": 0.5698424000001978,
"count": 1,
"self": 0.022616599999309983,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5472258000008878,
"count": 1,
"self": 0.5472258000008878
}
}
}
}
}
}
}