poca-SoccerTwos / run_logs /timers.json
YojitShinde's picture
First Push`
65491d8
raw
history blame
15.6 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.1853787899017334,
"min": 3.1321542263031006,
"max": 3.295640230178833,
"count": 55
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 85724.9140625,
"min": 18404.51953125,
"max": 105460.453125,
"count": 55
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 360.8,
"min": 233.56521739130434,
"max": 999.0,
"count": 55
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 21648.0,
"min": 17612.0,
"max": 23816.0,
"count": 55
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1235.6966781131266,
"min": 1193.9849330349607,
"max": 1235.6966781131266,
"count": 53
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 34599.506987167544,
"min": 2388.421744919647,
"max": 56583.659072376024,
"count": 53
},
"SoccerTwos.Step.mean": {
"value": 549998.0,
"min": 9924.0,
"max": 549998.0,
"count": 55
},
"SoccerTwos.Step.sum": {
"value": 549998.0,
"min": 9924.0,
"max": 549998.0,
"count": 55
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.016695523634552956,
"min": -0.009290765970945358,
"max": 0.07515978068113327,
"count": 55
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.5008656978607178,
"min": -0.3065952658653259,
"max": 1.0521931648254395,
"count": 55
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.01842910423874855,
"min": -0.009097704663872719,
"max": 0.07520028203725815,
"count": 55
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.5528731346130371,
"min": -0.300224244594574,
"max": 1.1129671335220337,
"count": 55
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 55
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 55
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.5275066653887431,
"min": -0.6299000016103188,
"max": 0.5275066653887431,
"count": 55
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 15.825199961662292,
"min": -15.117600038647652,
"max": 15.825199961662292,
"count": 55
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.5275066653887431,
"min": -0.6299000016103188,
"max": 0.5275066653887431,
"count": 55
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 15.825199961662292,
"min": -15.117600038647652,
"max": 15.825199961662292,
"count": 55
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 55
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 55
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.019107229733435815,
"min": 0.011625102110459314,
"max": 0.023824517099031557,
"count": 25
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.019107229733435815,
"min": 0.011625102110459314,
"max": 0.023824517099031557,
"count": 25
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.019781758605192105,
"min": 0.0018005522550083696,
"max": 0.019781758605192105,
"count": 25
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.019781758605192105,
"min": 0.0018005522550083696,
"max": 0.019781758605192105,
"count": 25
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.02071026973426342,
"min": 0.0018095454666763543,
"max": 0.02071026973426342,
"count": 25
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.02071026973426342,
"min": 0.0018095454666763543,
"max": 0.02071026973426342,
"count": 25
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 25
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 25
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 25
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 25
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 25
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 25
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690226164",
"python_version": "3.9.17 (main, Jul 5 2023, 20:47:11) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\yojit\\miniconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1690228555"
},
"total": 2391.4622449000003,
"count": 1,
"self": 0.3056692000000112,
"children": {
"run_training.setup": {
"total": 0.17043860000000022,
"count": 1,
"self": 0.17043860000000022
},
"TrainerController.start_learning": {
"total": 2390.9861371,
"count": 1,
"self": 1.1522503999890432,
"children": {
"TrainerController._reset_env": {
"total": 4.933811299999984,
"count": 3,
"self": 4.933811299999984
},
"TrainerController.advance": {
"total": 2384.663659600011,
"count": 36000,
"self": 1.3310645000224213,
"children": {
"env_step": {
"total": 921.8023372999942,
"count": 36000,
"self": 715.4007014999893,
"children": {
"SubprocessEnvManager._take_step": {
"total": 205.67426330000313,
"count": 36000,
"self": 7.900247900007798,
"children": {
"TorchPolicy.evaluate": {
"total": 197.77401539999533,
"count": 71210,
"self": 197.77401539999533
}
}
},
"workers": {
"total": 0.727372500001751,
"count": 36000,
"self": 0.0,
"children": {
"worker_root": {
"total": 2349.111099200014,
"count": 36000,
"is_parallel": true,
"self": 1774.1064614000106,
"children": {
"steps_from_proto": {
"total": 0.006796600000422259,
"count": 6,
"is_parallel": true,
"self": 0.00136760000067504,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.005428999999747219,
"count": 24,
"is_parallel": true,
"self": 0.005428999999747219
}
}
},
"UnityEnvironment.step": {
"total": 574.997841200003,
"count": 36000,
"is_parallel": true,
"self": 32.59274699997536,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.573396800002243,
"count": 36000,
"is_parallel": true,
"self": 24.573396800002243
},
"communicator.exchange": {
"total": 411.39709200000976,
"count": 36000,
"is_parallel": true,
"self": 411.39709200000976
},
"steps_from_proto": {
"total": 106.43460540001567,
"count": 72000,
"is_parallel": true,
"self": 21.500849300048472,
"children": {
"_process_rank_one_or_two_observation": {
"total": 84.9337560999672,
"count": 288000,
"is_parallel": true,
"self": 84.9337560999672
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1461.5302577999944,
"count": 36000,
"self": 8.366181699980416,
"children": {
"process_trajectory": {
"total": 194.8513073000147,
"count": 36000,
"self": 194.18469420001452,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6666131000001769,
"count": 1,
"self": 0.6666131000001769
}
}
},
"_update_policy": {
"total": 1258.3127687999993,
"count": 26,
"self": 116.67866579999577,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1141.6341030000035,
"count": 770,
"self": 1141.6341030000035
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.200000264769187e-06,
"count": 1,
"self": 3.200000264769187e-06
},
"TrainerController._save_models": {
"total": 0.23641259999976683,
"count": 1,
"self": 0.01481499999999869,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22159759999976814,
"count": 1,
"self": 0.22159759999976814
}
}
}
}
}
}
}