poca-SoccerTwos / run_logs /timers.json
neaven77's picture
First Push
d7b4b6e verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.4201643466949463,
"min": 1.3760290145874023,
"max": 3.29569673538208,
"count": 5000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 27539.826171875,
"min": 17013.634765625,
"max": 108446.1484375,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 76.06060606060606,
"min": 44.554545454545455,
"max": 999.0,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20080.0,
"min": 11908.0,
"max": 30344.0,
"count": 5000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1559.4187701124695,
"min": 1182.2200895003532,
"max": 1669.8784838732513,
"count": 4939
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 205843.27765484597,
"min": 2364.4401790007064,
"max": 341754.7593396053,
"count": 4939
},
"SoccerTwos.Step.mean": {
"value": 49999952.0,
"min": 9188.0,
"max": 49999952.0,
"count": 5000
},
"SoccerTwos.Step.sum": {
"value": 49999952.0,
"min": 9188.0,
"max": 49999952.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.01004683319479227,
"min": -0.1283930093050003,
"max": 0.1464795172214508,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -1.336228847503662,
"min": -22.826171875,
"max": 24.555360794067383,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.009106192737817764,
"min": -0.129206120967865,
"max": 0.14550182223320007,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -1.2111235857009888,
"min": -22.42377471923828,
"max": 25.301570892333984,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.12639699304910532,
"min": -0.6666666666666666,
"max": 0.5480761868613107,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -16.810800075531006,
"min": -64.7603999376297,
"max": 59.623199701309204,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.12639699304910532,
"min": -0.6666666666666666,
"max": 0.5480761868613107,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -16.810800075531006,
"min": -64.7603999376297,
"max": 59.623199701309204,
"count": 5000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.018519588883888598,
"min": 0.009680798851574461,
"max": 0.025912521505961194,
"count": 2420
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.018519588883888598,
"min": 0.009680798851574461,
"max": 0.025912521505961194,
"count": 2420
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0942157323161761,
"min": 6.27514643269933e-07,
"max": 0.11612870022654534,
"count": 2420
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0942157323161761,
"min": 6.27514643269933e-07,
"max": 0.11612870022654534,
"count": 2420
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09627160876989364,
"min": 5.592775825865222e-07,
"max": 0.11845562209685644,
"count": 2420
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09627160876989364,
"min": 5.592775825865222e-07,
"max": 0.11845562209685644,
"count": 2420
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2420
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2420
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 2420
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 2420
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2420
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2420
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1729338261",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\yongkseo\\.conda\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.0+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1729419756"
},
"total": 81493.7608779,
"count": 1,
"self": 0.3364776999951573,
"children": {
"run_training.setup": {
"total": 0.13590059999842197,
"count": 1,
"self": 0.13590059999842197
},
"TrainerController.start_learning": {
"total": 81493.2884996,
"count": 1,
"self": 53.013638499207445,
"children": {
"TrainerController._reset_env": {
"total": 6.9478357000116375,
"count": 250,
"self": 6.9478357000116375
},
"TrainerController.advance": {
"total": 81433.24586920078,
"count": 3416586,
"self": 52.43847630273376,
"children": {
"env_step": {
"total": 37983.07697930693,
"count": 3416586,
"self": 28740.3626186048,
"children": {
"SubprocessEnvManager._take_step": {
"total": 9211.062967196362,
"count": 3416586,
"self": 318.6551877049278,
"children": {
"TorchPolicy.evaluate": {
"total": 8892.407779491434,
"count": 6287466,
"self": 8892.407779491434
}
}
},
"workers": {
"total": 31.651393505766464,
"count": 3416586,
"self": 0.0,
"children": {
"worker_root": {
"total": 81421.48727009684,
"count": 3416586,
"is_parallel": true,
"self": 58375.67622640871,
"children": {
"steps_from_proto": {
"total": 0.27842489996692166,
"count": 500,
"is_parallel": true,
"self": 0.05472250020829961,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.22370239975862205,
"count": 2000,
"is_parallel": true,
"self": 0.22370239975862205
}
}
},
"UnityEnvironment.step": {
"total": 23045.532618788166,
"count": 3416586,
"is_parallel": true,
"self": 1146.8519696675576,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 839.3518982041423,
"count": 3416586,
"is_parallel": true,
"self": 839.3518982041423
},
"communicator.exchange": {
"total": 17501.82970619885,
"count": 3416586,
"is_parallel": true,
"self": 17501.82970619885
},
"steps_from_proto": {
"total": 3557.499044717617,
"count": 6833172,
"is_parallel": true,
"self": 698.6394697922588,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2858.859574925358,
"count": 27332688,
"is_parallel": true,
"self": 2858.859574925358
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 43397.73041359111,
"count": 3416586,
"self": 375.08932699069555,
"children": {
"process_trajectory": {
"total": 9295.895894200694,
"count": 3416586,
"self": 9287.601464900763,
"children": {
"RLTrainer._checkpoint": {
"total": 8.294429299930925,
"count": 100,
"self": 8.294429299930925
}
}
},
"_update_policy": {
"total": 33726.74519239972,
"count": 2420,
"self": 5000.413113001428,
"children": {
"TorchPOCAOptimizer.update": {
"total": 28726.332079398293,
"count": 72600,
"self": 28726.332079398293
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.00004568696022e-07,
"count": 1,
"self": 6.00004568696022e-07
},
"TrainerController._save_models": {
"total": 0.08115559999714606,
"count": 1,
"self": 0.0027892999933101237,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07836630000383593,
"count": 1,
"self": 0.07836630000383593
}
}
}
}
}
}
}