poca-SoccerTwos / run_logs /timers.json
sweetfelinity's picture
Initial commit`
c48b86d verified
raw
history blame
15.6 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.72711181640625,
"min": 1.671557068824768,
"max": 3.295755386352539,
"count": 784
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 35371.25,
"min": 16988.4453125,
"max": 109470.1875,
"count": 784
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 65.25,
"min": 41.05084745762712,
"max": 999.0,
"count": 784
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19836.0,
"min": 11988.0,
"max": 25940.0,
"count": 784
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1602.1742045247136,
"min": 1192.3536352246779,
"max": 1607.6897156320813,
"count": 750
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 243530.47908775645,
"min": 2384.7072704493557,
"max": 377309.4046767551,
"count": 750
},
"SoccerTwos.Step.mean": {
"value": 7839848.0,
"min": 9990.0,
"max": 7839848.0,
"count": 784
},
"SoccerTwos.Step.sum": {
"value": 7839848.0,
"min": 9990.0,
"max": 7839848.0,
"count": 784
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.0023163086734712124,
"min": -0.10220928490161896,
"max": 0.16093367338180542,
"count": 784
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.352078914642334,
"min": -20.237438201904297,
"max": 23.81818389892578,
"count": 784
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.0004817843437194824,
"min": -0.09853282570838928,
"max": 0.16025327146053314,
"count": 784
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.07323122024536133,
"min": -18.909896850585938,
"max": 23.717483520507812,
"count": 784
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 784
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 784
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.09625526399988878,
"min": -0.7386344835675996,
"max": 0.5267655150643711,
"count": 784
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 14.630800127983093,
"min": -65.3808000087738,
"max": 58.02219998836517,
"count": 784
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.09625526399988878,
"min": -0.7386344835675996,
"max": 0.5267655150643711,
"count": 784
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 14.630800127983093,
"min": -65.3808000087738,
"max": 58.02219998836517,
"count": 784
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 784
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 784
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01764298015429328,
"min": 0.010950238690323506,
"max": 0.022482394275721164,
"count": 377
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01764298015429328,
"min": 0.010950238690323506,
"max": 0.022482394275721164,
"count": 377
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09807224869728089,
"min": 4.585030543087972e-07,
"max": 0.1255581816037496,
"count": 377
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09807224869728089,
"min": 4.585030543087972e-07,
"max": 0.1255581816037496,
"count": 377
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09944819311300913,
"min": 4.424588117520519e-07,
"max": 0.1275894212226073,
"count": 377
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09944819311300913,
"min": 4.424588117520519e-07,
"max": 0.1275894212226073,
"count": 377
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 377
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 377
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 377
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 377
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 377
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 377
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1710442512",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\amber\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1710454283"
},
"total": 11770.568508300013,
"count": 1,
"self": 0.1466140000266023,
"children": {
"run_training.setup": {
"total": 0.04752399999415502,
"count": 1,
"self": 0.04752399999415502
},
"TrainerController.start_learning": {
"total": 11770.374370299993,
"count": 1,
"self": 7.355365001145401,
"children": {
"TrainerController._reset_env": {
"total": 4.624762799954624,
"count": 40,
"self": 4.624762799954624
},
"TrainerController.advance": {
"total": 11758.247405398884,
"count": 534775,
"self": 7.542019303058623,
"children": {
"env_step": {
"total": 5282.786133503381,
"count": 534775,
"self": 4117.349457297823,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1160.7886641996301,
"count": 534775,
"self": 46.50264438748127,
"children": {
"TorchPolicy.evaluate": {
"total": 1114.2860198121489,
"count": 991630,
"self": 1114.2860198121489
}
}
},
"workers": {
"total": 4.64801200592774,
"count": 534774,
"self": 0.0,
"children": {
"worker_root": {
"total": 11757.757449391793,
"count": 534774,
"is_parallel": true,
"self": 8455.259314197727,
"children": {
"steps_from_proto": {
"total": 0.040848600037861615,
"count": 80,
"is_parallel": true,
"self": 0.008222899836255237,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.03262570020160638,
"count": 320,
"is_parallel": true,
"self": 0.03262570020160638
}
}
},
"UnityEnvironment.step": {
"total": 3302.457286594028,
"count": 534774,
"is_parallel": true,
"self": 156.80489337674226,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 115.85295320823207,
"count": 534774,
"is_parallel": true,
"self": 115.85295320823207
},
"communicator.exchange": {
"total": 2535.0331652071036,
"count": 534774,
"is_parallel": true,
"self": 2535.0331652071036
},
"steps_from_proto": {
"total": 494.7662748019502,
"count": 1069548,
"is_parallel": true,
"self": 97.41217780351872,
"children": {
"_process_rank_one_or_two_observation": {
"total": 397.3540969984315,
"count": 4278192,
"is_parallel": true,
"self": 397.3540969984315
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 6467.9192525924445,
"count": 534774,
"self": 57.76235199844814,
"children": {
"process_trajectory": {
"total": 1269.3616613940394,
"count": 534774,
"self": 1268.0420684940618,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3195928999775788,
"count": 15,
"self": 1.3195928999775788
}
}
},
"_update_policy": {
"total": 5140.795239199957,
"count": 377,
"self": 706.3808342998964,
"children": {
"TorchPOCAOptimizer.update": {
"total": 4434.414404900061,
"count": 11316,
"self": 4434.414404900061
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.00006091594696e-07,
"count": 1,
"self": 8.00006091594696e-07
},
"TrainerController._save_models": {
"total": 0.14683630000217818,
"count": 1,
"self": 0.0026784000074258074,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14415789999475237,
"count": 1,
"self": 0.14415789999475237
}
}
}
}
}
}
}