poca-SoccerTwos / run_logs /timers.json
ann-ie's picture
First Push
14d1321 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.464221477508545,
"min": 1.2736166715621948,
"max": 3.295741081237793,
"count": 3272
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 30830.6484375,
"min": 13012.1484375,
"max": 111580.859375,
"count": 3272
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 72.6,
"min": 38.556451612903224,
"max": 999.0,
"count": 3272
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20328.0,
"min": 10496.0,
"max": 30916.0,
"count": 3272
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1605.226769204585,
"min": 1193.3543124428843,
"max": 1695.1346730382043,
"count": 3267
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 224731.7476886419,
"min": 2386.7086248857686,
"max": 414252.2353853305,
"count": 3267
},
"SoccerTwos.Step.mean": {
"value": 32719910.0,
"min": 9474.0,
"max": 32719910.0,
"count": 3272
},
"SoccerTwos.Step.sum": {
"value": 32719910.0,
"min": 9474.0,
"max": 32719910.0,
"count": 3272
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.023001251742243767,
"min": -0.14658641815185547,
"max": 0.22663359344005585,
"count": 3272
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -3.2431764602661133,
"min": -31.809253692626953,
"max": 29.18775177001953,
"count": 3272
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.020451392978429794,
"min": -0.14640040695667267,
"max": 0.22649861872196198,
"count": 3272
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -2.8836464881896973,
"min": -31.768888473510742,
"max": 28.202497482299805,
"count": 3272
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 3272
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 3272
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.0017914890397525002,
"min": -0.5555555555555556,
"max": 0.5120800018310547,
"count": 3272
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -0.25259995460510254,
"min": -71.61340010166168,
"max": 62.30999982357025,
"count": 3272
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.0017914890397525002,
"min": -0.5555555555555556,
"max": 0.5120800018310547,
"count": 3272
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -0.25259995460510254,
"min": -71.61340010166168,
"max": 62.30999982357025,
"count": 3272
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3272
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3272
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.020361812616465615,
"min": 0.008717195485951379,
"max": 0.02725926701289912,
"count": 1587
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.020361812616465615,
"min": 0.008717195485951379,
"max": 0.02725926701289912,
"count": 1587
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.08448542207479477,
"min": 0.001476826563399906,
"max": 0.13258785431583722,
"count": 1587
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.08448542207479477,
"min": 0.001476826563399906,
"max": 0.13258785431583722,
"count": 1587
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.08545316110054652,
"min": 0.00148449419454361,
"max": 0.13661988104383152,
"count": 1587
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.08545316110054652,
"min": 0.00148449419454361,
"max": 0.13661988104383152,
"count": 1587
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 1587
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 1587
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 1587
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 1587
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 1587
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 1587
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1712499231",
"python_version": "3.10.12 (main, Jul 5 2023, 15:02:25) [Clang 14.0.6 ]",
"command_line_arguments": "/Users/anniewong/anaconda3/envs/marl-soccer/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos_v1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.2",
"numpy_version": "1.23.5",
"end_time_seconds": "1712596994"
},
"total": 92705.15982116699,
"count": 1,
"self": 0.10832149999623653,
"children": {
"run_training.setup": {
"total": 0.01164516700009699,
"count": 1,
"self": 0.01164516700009699
},
"TrainerController.start_learning": {
"total": 92705.03985449999,
"count": 1,
"self": 17.35822578954685,
"children": {
"TrainerController._reset_env": {
"total": 6.829817416899459,
"count": 164,
"self": 6.829817416899459
},
"TrainerController.advance": {
"total": 92680.70507333455,
"count": 2263567,
"self": 15.826123002640088,
"children": {
"env_step": {
"total": 73745.22544223705,
"count": 2263567,
"self": 71098.37476374,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2635.957448261717,
"count": 2263567,
"self": 78.06975966116806,
"children": {
"TorchPolicy.evaluate": {
"total": 2557.887688600549,
"count": 4110446,
"self": 2557.887688600549
}
}
},
"workers": {
"total": 10.89323023533143,
"count": 2263566,
"self": 0.0,
"children": {
"worker_root": {
"total": 92673.70175010336,
"count": 2263566,
"is_parallel": true,
"self": 23725.027372671713,
"children": {
"steps_from_proto": {
"total": 0.19879645116088795,
"count": 328,
"is_parallel": true,
"self": 0.024070366143860156,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.1747260850170278,
"count": 1312,
"is_parallel": true,
"self": 0.1747260850170278
}
}
},
"UnityEnvironment.step": {
"total": 68948.47558098048,
"count": 2263566,
"is_parallel": true,
"self": 191.88667373587668,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1258.7498449246123,
"count": 2263566,
"is_parallel": true,
"self": 1258.7498449246123
},
"communicator.exchange": {
"total": 65020.0371697729,
"count": 2263566,
"is_parallel": true,
"self": 65020.0371697729
},
"steps_from_proto": {
"total": 2477.801892547097,
"count": 4527132,
"is_parallel": true,
"self": 288.4266735231722,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2189.3752190239247,
"count": 18108528,
"is_parallel": true,
"self": 2189.3752190239247
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 18919.65350809487,
"count": 2263566,
"self": 168.29818683945268,
"children": {
"process_trajectory": {
"total": 3569.982182907468,
"count": 2263566,
"self": 3564.2477374044574,
"children": {
"RLTrainer._checkpoint": {
"total": 5.734445503010647,
"count": 65,
"self": 5.734445503010647
}
}
},
"_update_policy": {
"total": 15181.37313834795,
"count": 1587,
"self": 1664.5327628329505,
"children": {
"TorchPOCAOptimizer.update": {
"total": 13516.840375515,
"count": 47613,
"self": 13516.840375515
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.839974619448185e-07,
"count": 1,
"self": 5.839974619448185e-07
},
"TrainerController._save_models": {
"total": 0.1467373749910621,
"count": 1,
"self": 0.0004923339874949306,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14624504100356717,
"count": 1,
"self": 0.14624504100356717
}
}
}
}
}
}
}