poca-SoccerTwos / run_logs /timers.json
VanillaVanilla's picture
First Push`
085f884 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.9083620309829712,
"min": 1.8701623678207397,
"max": 3.295753002166748,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 40487.80859375,
"min": 21909.728515625,
"max": 116174.859375,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 60.3780487804878,
"min": 42.60344827586207,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19804.0,
"min": 7900.0,
"max": 26356.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1549.2479446948657,
"min": 1201.1731570362992,
"max": 1554.2867745415815,
"count": 492
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 254076.66292995797,
"min": 2403.92563261748,
"max": 342515.3310216315,
"count": 492
},
"SoccerTwos.Step.mean": {
"value": 4999864.0,
"min": 9932.0,
"max": 4999864.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999864.0,
"min": 9932.0,
"max": 4999864.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.0015505763003602624,
"min": -0.08583631366491318,
"max": 0.15597806870937347,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.2542945146560669,
"min": -15.364700317382812,
"max": 27.501192092895508,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.0058425962924957275,
"min": -0.08904243260622025,
"max": 0.1600945144891739,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.9581857919692993,
"min": -15.93859577178955,
"max": 28.160961151123047,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.1581341457076189,
"min": -0.6634909110990438,
"max": 0.39891763645059924,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 25.9339998960495,
"min": -50.5407999753952,
"max": 56.88680016994476,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.1581341457076189,
"min": -0.6634909110990438,
"max": 0.39891763645059924,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 25.9339998960495,
"min": -50.5407999753952,
"max": 56.88680016994476,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01381340757458626,
"min": 0.01106493189615018,
"max": 0.024011059190767507,
"count": 240
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01381340757458626,
"min": 0.01106493189615018,
"max": 0.024011059190767507,
"count": 240
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.11282804906368256,
"min": 4.252453345543472e-05,
"max": 0.12220808764298757,
"count": 240
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.11282804906368256,
"min": 4.252453345543472e-05,
"max": 0.12220808764298757,
"count": 240
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11486122111479441,
"min": 4.2571803108633805e-05,
"max": 0.12413785929481189,
"count": 240
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11486122111479441,
"min": 4.2571803108633805e-05,
"max": 0.12413785929481189,
"count": 240
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 240
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 240
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 240
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 240
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 240
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 240
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1706252404",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\elsa917\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./soccerTwo.yaml --env=./SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1706264835"
},
"total": 12431.145774600001,
"count": 1,
"self": 0.335086200002479,
"children": {
"run_training.setup": {
"total": 0.11811050000005707,
"count": 1,
"self": 0.11811050000005707
},
"TrainerController.start_learning": {
"total": 12430.6925779,
"count": 1,
"self": 8.306792599501932,
"children": {
"TrainerController._reset_env": {
"total": 4.888159100000848,
"count": 25,
"self": 4.888159100000848
},
"TrainerController.advance": {
"total": 12417.367653400499,
"count": 340498,
"self": 8.01775750059278,
"children": {
"env_step": {
"total": 5968.070108200105,
"count": 340498,
"self": 4631.6375530994155,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1331.0929218002866,
"count": 340498,
"self": 42.983509300459446,
"children": {
"TorchPolicy.evaluate": {
"total": 1288.1094124998272,
"count": 631324,
"self": 1288.1094124998272
}
}
},
"workers": {
"total": 5.33963330040342,
"count": 340498,
"self": 0.0,
"children": {
"worker_root": {
"total": 12416.357360500515,
"count": 340498,
"is_parallel": true,
"self": 8743.221733501156,
"children": {
"steps_from_proto": {
"total": 0.04094869999926232,
"count": 50,
"is_parallel": true,
"self": 0.00824190000184899,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.03270679999741333,
"count": 200,
"is_parallel": true,
"self": 0.03270679999741333
}
}
},
"UnityEnvironment.step": {
"total": 3673.094678299359,
"count": 340498,
"is_parallel": true,
"self": 176.58975499843245,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 142.32844469985866,
"count": 340498,
"is_parallel": true,
"self": 142.32844469985866
},
"communicator.exchange": {
"total": 2783.00880460009,
"count": 340498,
"is_parallel": true,
"self": 2783.00880460009
},
"steps_from_proto": {
"total": 571.167674000978,
"count": 680996,
"is_parallel": true,
"self": 116.51077979990987,
"children": {
"_process_rank_one_or_two_observation": {
"total": 454.6568942010681,
"count": 2723984,
"is_parallel": true,
"self": 454.6568942010681
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 6441.279787699801,
"count": 340498,
"self": 56.869217100065725,
"children": {
"process_trajectory": {
"total": 1303.297275299747,
"count": 340498,
"self": 1301.9161581997482,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3811170999988462,
"count": 10,
"self": 1.3811170999988462
}
}
},
"_update_policy": {
"total": 5081.113295299989,
"count": 240,
"self": 664.5548821000411,
"children": {
"TorchPOCAOptimizer.update": {
"total": 4416.558413199948,
"count": 7206,
"self": 4416.558413199948
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0999992809956893e-06,
"count": 1,
"self": 1.0999992809956893e-06
},
"TrainerController._save_models": {
"total": 0.12997169999835023,
"count": 1,
"self": 0.006059199999072007,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12391249999927823,
"count": 1,
"self": 0.12391249999927823
}
}
}
}
}
}
}