poca-SoccerTwos / run_logs /timers.json
Statos6's picture
Retrained to 10M steps, with higher play_against_latest_model_ratio (0.75 instead of 0.25) this helped the model to learn to play defense better
f5c5d35 verified
raw
history blame
20.3 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.4048619270324707,
"min": 1.3750499486923218,
"max": 1.7051565647125244,
"count": 335
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 28277.060546875,
"min": 14701.9814453125,
"max": 34322.671875,
"count": 335
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 45.06422018348624,
"min": 35.125,
"max": 53.40425531914894,
"count": 335
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19648.0,
"min": 8072.0,
"max": 20960.0,
"count": 335
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1607.9046472284606,
"min": 1555.6832652825399,
"max": 1613.038356436412,
"count": 335
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 350523.2130958044,
"min": 163654.2453609866,
"max": 434134.2547516843,
"count": 335
},
"SoccerTwos.Step.mean": {
"value": 9999982.0,
"min": 6659988.0,
"max": 9999982.0,
"count": 335
},
"SoccerTwos.Step.sum": {
"value": 9999982.0,
"min": 6659988.0,
"max": 9999982.0,
"count": 335
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.03223046660423279,
"min": -0.11106479167938232,
"max": 0.10332842171192169,
"count": 335
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 13.246722221374512,
"min": -46.31401824951172,
"max": 44.017906188964844,
"count": 335
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.031953852623701096,
"min": -0.11250180751085281,
"max": 0.10477326810359955,
"count": 335
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 13.133033752441406,
"min": -46.91325378417969,
"max": 44.6334114074707,
"count": 335
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 335
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 335
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.09531192604554903,
"min": -0.381275439994377,
"max": 0.26663347656634745,
"count": 335
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 20.777999877929688,
"min": -86.93080031871796,
"max": 62.125600039958954,
"count": 335
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.09531192604554903,
"min": -0.381275439994377,
"max": 0.26663347656634745,
"count": 335
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 20.777999877929688,
"min": -86.93080031871796,
"max": 62.125600039958954,
"count": 335
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 335
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 335
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01046212162455049,
"min": 0.009879295097471185,
"max": 0.014559235423075734,
"count": 51
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01046212162455049,
"min": 0.009879295097471185,
"max": 0.014559235423075734,
"count": 51
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.05891079842112958,
"min": 0.04971672408282757,
"max": 0.05918900854885578,
"count": 51
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.05891079842112958,
"min": 0.04971672408282757,
"max": 0.05918900854885578,
"count": 51
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.05924153439700604,
"min": 0.051102403877303006,
"max": 0.05954511137679219,
"count": 51
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.05924153439700604,
"min": 0.051102403877303006,
"max": 0.05954511137679219,
"count": 51
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 6.4449295954992225e-09,
"min": 6.4449295954992225e-09,
"max": 0.00014248644558948728,
"count": 51
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 6.4449295954992225e-09,
"min": 6.4449295954992225e-09,
"max": 0.00014248644558948728,
"count": 51
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.10000085146159124,
"min": 0.10000085146159124,
"max": 0.11912103967704953,
"count": 51
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.10000085146159124,
"min": 0.10000085146159124,
"max": 0.11912103967704953,
"count": 51
},
"SoccerTwos.Policy.Beta.mean": {
"value": 1.0002659404254946e-05,
"min": 1.0002659404254946e-05,
"max": 6.972151274949521e-05,
"count": 51
},
"SoccerTwos.Policy.Beta.sum": {
"value": 1.0002659404254946e-05,
"min": 1.0002659404254946e-05,
"max": 6.972151274949521e-05,
"count": 51
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1710343829",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/home/stephan/.conda/envs/rl/bin/mlagents-learn ./ml-agents/config/poca/SoccerTwos.yaml --env=./ml-agents/training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos-read-deal --width=1280 --height=720 --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1710353568"
},
"total": 9739.254863360999,
"count": 1,
"self": 0.270132729987381,
"children": {
"run_training.setup": {
"total": 0.018506341002648696,
"count": 1,
"self": 0.018506341002648696
},
"TrainerController.start_learning": {
"total": 9738.966224290009,
"count": 1,
"self": 4.652608350283117,
"children": {
"TrainerController._reset_env": {
"total": 4.244304390012985,
"count": 7,
"self": 4.244304390012985
},
"TrainerController.advance": {
"total": 9729.947026144713,
"count": 239073,
"self": 4.148107524248189,
"children": {
"env_step": {
"total": 4354.3673745146225,
"count": 239073,
"self": 3721.0493522615725,
"children": {
"SubprocessEnvManager._take_step": {
"total": 630.4299217515072,
"count": 239073,
"self": 19.02124206413282,
"children": {
"TorchPolicy.evaluate": {
"total": 611.4086796873744,
"count": 418182,
"self": 611.4086796873744
}
}
},
"workers": {
"total": 2.8881005015427945,
"count": 239073,
"self": 0.0,
"children": {
"worker_root": {
"total": 9731.739510110943,
"count": 239073,
"is_parallel": true,
"self": 6533.360267171403,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.06389564499841072,
"count": 2,
"is_parallel": true,
"self": 0.000593470991589129,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.06330217400682159,
"count": 8,
"is_parallel": true,
"self": 0.06330217400682159
}
}
},
"UnityEnvironment.step": {
"total": 0.02612613000383135,
"count": 1,
"is_parallel": true,
"self": 0.000600070008658804,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00040655999328009784,
"count": 1,
"is_parallel": true,
"self": 0.00040655999328009784
},
"communicator.exchange": {
"total": 0.02343449799809605,
"count": 1,
"is_parallel": true,
"self": 0.02343449799809605
},
"steps_from_proto": {
"total": 0.0016850020037963986,
"count": 2,
"is_parallel": true,
"self": 0.0003057300054933876,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001379271998303011,
"count": 8,
"is_parallel": true,
"self": 0.001379271998303011
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.012151043978519738,
"count": 12,
"is_parallel": true,
"self": 0.002059131977148354,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.010091912001371384,
"count": 48,
"is_parallel": true,
"self": 0.010091912001371384
}
}
},
"UnityEnvironment.step": {
"total": 3198.3670918955613,
"count": 239072,
"is_parallel": true,
"self": 175.16570653802773,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 110.43290661888022,
"count": 239072,
"is_parallel": true,
"self": 110.43290661888022
},
"communicator.exchange": {
"total": 2470.8126502879313,
"count": 239072,
"is_parallel": true,
"self": 2470.8126502879313
},
"steps_from_proto": {
"total": 441.95582845072204,
"count": 478144,
"is_parallel": true,
"self": 78.3687699053844,
"children": {
"_process_rank_one_or_two_observation": {
"total": 363.58705854533764,
"count": 1912576,
"is_parallel": true,
"self": 363.58705854533764
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 5371.4315441058425,
"count": 239073,
"self": 26.83088273082103,
"children": {
"process_trajectory": {
"total": 1094.2377518599678,
"count": 239073,
"self": 1093.3974272879568,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8403245720110135,
"count": 7,
"self": 0.8403245720110135
}
}
},
"_update_policy": {
"total": 4250.362909515054,
"count": 51,
"self": 652.032682771809,
"children": {
"TorchPOCAOptimizer.update": {
"total": 3598.3302267432446,
"count": 4080,
"self": 3598.3302267432446
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.200011845678091e-07,
"count": 1,
"self": 9.200011845678091e-07
},
"TrainerController._save_models": {
"total": 0.12228448499809019,
"count": 1,
"self": 0.0026952329935738817,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11958925200451631,
"count": 1,
"self": 0.11958925200451631
}
}
}
}
}
}
}