poca-SoccerTwos / run_logs /timers.json
Oslaw's picture
First Push`
c9f024b
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.183866262435913,
"min": 3.1576008796691895,
"max": 3.2451469898223877,
"count": 80
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 147731.390625,
"min": 32274.576171875,
"max": 147731.390625,
"count": 80
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 528.2,
"min": 389.6666666666667,
"max": 999.0,
"count": 80
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 21128.0,
"min": 4676.0,
"max": 31968.0,
"count": 80
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 82
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 82
},
"SoccerTwos.Step.mean": {
"value": 1309252.0,
"min": 509914.0,
"max": 1309252.0,
"count": 81
},
"SoccerTwos.Step.sum": {
"value": 1309252.0,
"min": 509914.0,
"max": 1309252.0,
"count": 81
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.007248410955071449,
"min": -0.016149107366800308,
"max": 0.007384084165096283,
"count": 81
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.13771981000900269,
"min": -0.16149106621742249,
"max": 0.07860517501831055,
"count": 81
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.00707031786441803,
"min": -0.016183573752641678,
"max": 0.007944553159177303,
"count": 81
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.13433603942394257,
"min": -0.16183573007583618,
"max": 0.07944553345441818,
"count": 81
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 81
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 81
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": -0.38606666773557663,
"max": 0.24750769138336182,
"count": 81
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": -4.6328000128269196,
"max": 3.2175999879837036,
"count": 81
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": -0.38606666773557663,
"max": 0.24750769138336182,
"count": 81
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": -4.6328000128269196,
"max": 3.2175999879837036,
"count": 81
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1211.7637330361702,
"min": 1202.8442763085673,
"max": 1211.7637330361702,
"count": 42
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2423.5274660723403,
"min": 2405.6885526171345,
"max": 7247.163809551618,
"count": 42
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016080106038134544,
"min": 0.010803807491902262,
"max": 0.019793145587512603,
"count": 37
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016080106038134544,
"min": 0.010803807491902262,
"max": 0.019793145587512603,
"count": 37
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 7.310748022367382e-07,
"min": 5.97159252417138e-07,
"max": 0.002651917798599849,
"count": 37
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 7.310748022367382e-07,
"min": 5.97159252417138e-07,
"max": 0.002651917798599849,
"count": 37
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 7.017144450098082e-07,
"min": 6.43051701369283e-07,
"max": 0.002660218292536835,
"count": 37
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 7.017144450098082e-07,
"min": 6.43051701369283e-07,
"max": 0.002660218292536835,
"count": 37
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 37
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 37
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999993,
"max": 0.20000000000000007,
"count": 37
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999993,
"max": 0.20000000000000007,
"count": 37
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 37
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 37
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689795572",
"python_version": "3.9.17 (main, Jul 5 2023, 20:47:11) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\User\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1689800125"
},
"total": 4552.5900939,
"count": 1,
"self": 4.528436600000532,
"children": {
"run_training.setup": {
"total": 0.12988880000000025,
"count": 1,
"self": 0.12988880000000025
},
"TrainerController.start_learning": {
"total": 4547.9317685,
"count": 1,
"self": 2.441726900019603,
"children": {
"TrainerController._reset_env": {
"total": 3.9429609999990745,
"count": 6,
"self": 3.9429609999990745
},
"TrainerController.advance": {
"total": 4541.208982099982,
"count": 53312,
"self": 2.7120177999777297,
"children": {
"env_step": {
"total": 1871.9676023000193,
"count": 53312,
"self": 1446.537583399981,
"children": {
"SubprocessEnvManager._take_step": {
"total": 423.9763628000451,
"count": 53312,
"self": 16.19581090012082,
"children": {
"TorchPolicy.evaluate": {
"total": 407.78055189992426,
"count": 105910,
"self": 407.78055189992426
}
}
},
"workers": {
"total": 1.4536560999931547,
"count": 53311,
"self": 0.0,
"children": {
"worker_root": {
"total": 4540.639477399994,
"count": 53311,
"is_parallel": true,
"self": 3382.275085500032,
"children": {
"steps_from_proto": {
"total": 0.015660300000250427,
"count": 12,
"is_parallel": true,
"self": 0.003938999999730797,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.01172130000051963,
"count": 48,
"is_parallel": true,
"self": 0.01172130000051963
}
}
},
"UnityEnvironment.step": {
"total": 1158.3487315999612,
"count": 53311,
"is_parallel": true,
"self": 58.515838100003975,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 47.16620979997619,
"count": 53311,
"is_parallel": true,
"self": 47.16620979997619
},
"communicator.exchange": {
"total": 852.6608494999962,
"count": 53311,
"is_parallel": true,
"self": 852.6608494999962
},
"steps_from_proto": {
"total": 200.0058341999848,
"count": 106622,
"is_parallel": true,
"self": 41.84823450001252,
"children": {
"_process_rank_one_or_two_observation": {
"total": 158.15759969997228,
"count": 426488,
"is_parallel": true,
"self": 158.15759969997228
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2666.529361999985,
"count": 53311,
"self": 17.578268899951127,
"children": {
"process_trajectory": {
"total": 366.78948090003337,
"count": 53311,
"self": 366.0611124000332,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7283685000001441,
"count": 2,
"self": 0.7283685000001441
}
}
},
"_update_policy": {
"total": 2282.161612200001,
"count": 37,
"self": 247.92242620000297,
"children": {
"TorchPOCAOptimizer.update": {
"total": 2034.239185999998,
"count": 1125,
"self": 2034.239185999998
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.6999996407539584e-06,
"count": 1,
"self": 2.6999996407539584e-06
},
"TrainerController._save_models": {
"total": 0.3380957999997918,
"count": 1,
"self": 0.021577899999101646,
"children": {
"RLTrainer._checkpoint": {
"total": 0.31651790000069013,
"count": 1,
"self": 0.31651790000069013
}
}
}
}
}
}
}