SoccerTwos / run_logs /timers.json
manuu01's picture
Update_agent
f7ccbb2
raw
history blame
20.1 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.1842167377471924,
"min": 3.172684907913208,
"max": 3.249568462371826,
"count": 50
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 35459.4375,
"min": 13283.650390625,
"max": 117639.375,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 688.0,
"min": 341.46153846153845,
"max": 999.0,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 22016.0,
"min": 4124.0,
"max": 31968.0,
"count": 50
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1226.0866013136713,
"min": 1202.3735835901539,
"max": 1226.0866013136713,
"count": 46
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 7356.519607882027,
"min": 2404.7471671803078,
"max": 22042.38445482544,
"count": 46
},
"SoccerTwos.Step.mean": {
"value": 1999130.0,
"min": 1509307.0,
"max": 1999130.0,
"count": 50
},
"SoccerTwos.Step.sum": {
"value": 1999130.0,
"min": 1509307.0,
"max": 1999130.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.0016364417970180511,
"min": -0.004181301221251488,
"max": 0.008756794035434723,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.02618306875228882,
"min": -0.0664612427353859,
"max": 0.157622292637825,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.0002816212363541126,
"min": -0.0045310077257454395,
"max": 0.01224028505384922,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.004505939781665802,
"min": -0.07003718614578247,
"max": 0.17136399447917938,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.03152499347925186,
"min": -0.42857142857142855,
"max": 0.45002222061157227,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -0.5043998956680298,
"min": -6.0,
"max": 8.1003999710083,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.03152499347925186,
"min": -0.42857142857142855,
"max": 0.45002222061157227,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -0.5043998956680298,
"min": -6.0,
"max": 8.1003999710083,
"count": 50
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017224992097665866,
"min": 0.013165214379445057,
"max": 0.021765073745821914,
"count": 22
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017224992097665866,
"min": 0.013165214379445057,
"max": 0.021765073745821914,
"count": 22
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0023239689956729612,
"min": 0.0005247537120643149,
"max": 0.006923159894843896,
"count": 22
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0023239689956729612,
"min": 0.0005247537120643149,
"max": 0.006923159894843896,
"count": 22
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.002343843465981384,
"min": 0.0005249925727791075,
"max": 0.007085357047617436,
"count": 22
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.002343843465981384,
"min": 0.0005249925727791075,
"max": 0.007085357047617436,
"count": 22
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0005000000000000001,
"min": 0.0005,
"max": 0.0005000000000000001,
"count": 22
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0005000000000000001,
"min": 0.0005,
"max": 0.0005000000000000001,
"count": 22
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 22
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 22
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 22
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 22
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690838276",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --resume --torch-device=cpu",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690840210"
},
"total": 1933.9327643760007,
"count": 1,
"self": 0.4902367780014174,
"children": {
"run_training.setup": {
"total": 0.060607506000451394,
"count": 1,
"self": 0.060607506000451394
},
"TrainerController.start_learning": {
"total": 1933.3819200919988,
"count": 1,
"self": 1.3161826378791375,
"children": {
"TrainerController._reset_env": {
"total": 2.0475889189992813,
"count": 4,
"self": 2.0475889189992813
},
"TrainerController.advance": {
"total": 1929.7939666371203,
"count": 32520,
"self": 1.3229786641668397,
"children": {
"env_step": {
"total": 1111.4364407127541,
"count": 32520,
"self": 933.357528601251,
"children": {
"SubprocessEnvManager._take_step": {
"total": 177.2854299587716,
"count": 32520,
"self": 8.542072947703673,
"children": {
"TorchPolicy.evaluate": {
"total": 168.74335701106793,
"count": 64494,
"self": 168.74335701106793
}
}
},
"workers": {
"total": 0.7934821527314853,
"count": 32520,
"self": 0.0,
"children": {
"worker_root": {
"total": 1922.9118322439936,
"count": 32520,
"is_parallel": true,
"self": 1147.965107933036,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006658627000433626,
"count": 2,
"is_parallel": true,
"self": 0.001168770000731456,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0054898569997021696,
"count": 8,
"is_parallel": true,
"self": 0.0054898569997021696
}
}
},
"UnityEnvironment.step": {
"total": 0.08002122800007783,
"count": 1,
"is_parallel": true,
"self": 0.0013706099998671561,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0009310750001532142,
"count": 1,
"is_parallel": true,
"self": 0.0009310750001532142
},
"communicator.exchange": {
"total": 0.07334866499968484,
"count": 1,
"is_parallel": true,
"self": 0.07334866499968484
},
"steps_from_proto": {
"total": 0.004370878000372613,
"count": 2,
"is_parallel": true,
"self": 0.0007474649992218474,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003623413001150766,
"count": 8,
"is_parallel": true,
"self": 0.003623413001150766
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.010676853999029845,
"count": 6,
"is_parallel": true,
"self": 0.0018945339979836717,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.008782320001046173,
"count": 24,
"is_parallel": true,
"self": 0.008782320001046173
}
}
},
"UnityEnvironment.step": {
"total": 774.9360474569585,
"count": 32519,
"is_parallel": true,
"self": 46.491318513307306,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.494726218955293,
"count": 32519,
"is_parallel": true,
"self": 23.494726218955293
},
"communicator.exchange": {
"total": 556.1550727349577,
"count": 32519,
"is_parallel": true,
"self": 556.1550727349577
},
"steps_from_proto": {
"total": 148.7949299897382,
"count": 65038,
"is_parallel": true,
"self": 25.85352793439506,
"children": {
"_process_rank_one_or_two_observation": {
"total": 122.94140205534313,
"count": 260152,
"is_parallel": true,
"self": 122.94140205534313
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 817.0345472601994,
"count": 32520,
"self": 10.883385765314415,
"children": {
"process_trajectory": {
"total": 124.86339521088667,
"count": 32520,
"self": 124.61784183288728,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2455533779993857,
"count": 1,
"self": 0.2455533779993857
}
}
},
"_update_policy": {
"total": 681.2877662839983,
"count": 22,
"self": 123.35581107800499,
"children": {
"TorchPOCAOptimizer.update": {
"total": 557.9319552059933,
"count": 672,
"self": 557.9319552059933
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1310003174003214e-06,
"count": 1,
"self": 1.1310003174003214e-06
},
"TrainerController._save_models": {
"total": 0.22418076699977973,
"count": 1,
"self": 0.0019517669988999842,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22222900000087975,
"count": 1,
"self": 0.22222900000087975
}
}
}
}
}
}
}