poca-SoccerTwos / run_logs /timers.json
MLIsaac's picture
First Push
2a5e195 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.275252342224121,
"min": 2.275252342224121,
"max": 3.2956926822662354,
"count": 197
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 45213.8125,
"min": 27345.453125,
"max": 105462.1640625,
"count": 197
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 52.680851063829785,
"min": 47.47,
"max": 999.0,
"count": 197
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19808.0,
"min": 12556.0,
"max": 31968.0,
"count": 197
},
"SoccerTwos.Step.mean": {
"value": 1979958.0,
"min": 9000.0,
"max": 1979958.0,
"count": 198
},
"SoccerTwos.Step.sum": {
"value": 1979958.0,
"min": 9000.0,
"max": 1979958.0,
"count": 198
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.025730594992637634,
"min": -0.11822260171175003,
"max": 0.2088506817817688,
"count": 198
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -4.8373517990112305,
"min": -14.961146354675293,
"max": 26.315185546875,
"count": 198
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.02415480464696884,
"min": -0.12006423622369766,
"max": 0.2116696834564209,
"count": 198
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -4.541103363037109,
"min": -14.156135559082031,
"max": 26.670379638671875,
"count": 198
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 198
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 198
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.06313935905060869,
"min": -0.5882352941176471,
"max": 0.6639481518003676,
"count": 198
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 11.870199501514435,
"min": -59.55000019073486,
"max": 71.7064003944397,
"count": 198
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.06313935905060869,
"min": -0.5882352941176471,
"max": 0.6639481518003676,
"count": 198
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 11.870199501514435,
"min": -59.55000019073486,
"max": 71.7064003944397,
"count": 198
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 198
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 198
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1473.1825463414482,
"min": 1194.0034057009439,
"max": 1473.1825463414482,
"count": 196
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 276958.31871219224,
"min": 2388.50241698197,
"max": 303758.06167282234,
"count": 196
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016436572575670046,
"min": 0.011545663926517591,
"max": 0.023501400930884604,
"count": 94
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016436572575670046,
"min": 0.011545663926517591,
"max": 0.023501400930884604,
"count": 94
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0995179792245229,
"min": 0.0008222002851349923,
"max": 0.10937345027923584,
"count": 94
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0995179792245229,
"min": 0.0008222002851349923,
"max": 0.10937345027923584,
"count": 94
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10111721629897753,
"min": 0.0008312070118942453,
"max": 0.11332893148064613,
"count": 94
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10111721629897753,
"min": 0.0008312070118942453,
"max": 0.11332893148064613,
"count": 94
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 94
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 94
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 94
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 94
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 94
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 94
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1713530296",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/home/ixu/Desktop/Projects WIP/marl_soccer/.venv/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1713531536"
},
"total": 1240.4790082669351,
"count": 1,
"self": 0.04742303502280265,
"children": {
"run_training.setup": {
"total": 0.007101755938492715,
"count": 1,
"self": 0.007101755938492715
},
"TrainerController.start_learning": {
"total": 1240.4244834759738,
"count": 1,
"self": 1.1302310549654067,
"children": {
"TrainerController._reset_env": {
"total": 0.7685058229835704,
"count": 10,
"self": 0.7685058229835704
},
"TrainerController.advance": {
"total": 1238.4071374209598,
"count": 133285,
"self": 1.171209466876462,
"children": {
"env_step": {
"total": 966.6317162554478,
"count": 133285,
"self": 719.107268515043,
"children": {
"SubprocessEnvManager._take_step": {
"total": 246.7787815622287,
"count": 133285,
"self": 8.112934920587577,
"children": {
"TorchPolicy.evaluate": {
"total": 238.6658466416411,
"count": 251501,
"self": 238.6658466416411
}
}
},
"workers": {
"total": 0.7456661781761795,
"count": 133284,
"self": 0.0,
"children": {
"worker_root": {
"total": 1238.8091836764943,
"count": 133284,
"is_parallel": true,
"self": 651.9256760552526,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010363230248913169,
"count": 2,
"is_parallel": true,
"self": 0.00023955409415066242,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007967689307406545,
"count": 8,
"is_parallel": true,
"self": 0.0007967689307406545
}
}
},
"UnityEnvironment.step": {
"total": 0.013842551968991756,
"count": 1,
"is_parallel": true,
"self": 0.0002746081445366144,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001937089255079627,
"count": 1,
"is_parallel": true,
"self": 0.0001937089255079627
},
"communicator.exchange": {
"total": 0.01269432797562331,
"count": 1,
"is_parallel": true,
"self": 0.01269432797562331
},
"steps_from_proto": {
"total": 0.0006799069233238697,
"count": 2,
"is_parallel": true,
"self": 0.00013445387594401836,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005454530473798513,
"count": 8,
"is_parallel": true,
"self": 0.0005454530473798513
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 586.8769371736562,
"count": 133283,
"is_parallel": true,
"self": 32.728412658092566,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 20.463317427318543,
"count": 133283,
"is_parallel": true,
"self": 20.463317427318543
},
"communicator.exchange": {
"total": 443.9430830233032,
"count": 133283,
"is_parallel": true,
"self": 443.9430830233032
},
"steps_from_proto": {
"total": 89.74212406494189,
"count": 266566,
"is_parallel": true,
"self": 16.447025579866022,
"children": {
"_process_rank_one_or_two_observation": {
"total": 73.29509848507587,
"count": 1066264,
"is_parallel": true,
"self": 73.29509848507587
}
}
}
}
},
"steps_from_proto": {
"total": 0.00657044758554548,
"count": 18,
"is_parallel": true,
"self": 0.001229763263836503,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.005340684321708977,
"count": 72,
"is_parallel": true,
"self": 0.005340684321708977
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 270.6042116986355,
"count": 133284,
"self": 10.074312842800282,
"children": {
"process_trajectory": {
"total": 111.07563559175469,
"count": 133284,
"self": 110.69107601267751,
"children": {
"RLTrainer._checkpoint": {
"total": 0.38455957907717675,
"count": 3,
"self": 0.38455957907717675
}
}
},
"_update_policy": {
"total": 149.4542632640805,
"count": 95,
"self": 96.65693989722058,
"children": {
"TorchPOCAOptimizer.update": {
"total": 52.79732336685993,
"count": 2862,
"self": 52.79732336685993
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.450557753443718e-07,
"count": 1,
"self": 4.450557753443718e-07
},
"TrainerController._save_models": {
"total": 0.11860873200930655,
"count": 1,
"self": 0.0009553009876981378,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11765343102160841,
"count": 1,
"self": 0.11765343102160841
}
}
}
}
}
}
}