poca-SoccerTwos / run_logs /timers.json
diepala's picture
First Push
cd92a74 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.2167279720306396,
"min": 3.1973202228546143,
"max": 3.2956995964050293,
"count": 50
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 102935.296875,
"min": 37185.703125,
"max": 130729.421875,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 558.375,
"min": 355.2307692307692,
"max": 999.0,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 17868.0,
"min": 15068.0,
"max": 25260.0,
"count": 50
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1194.187218738631,
"min": 1194.187218738631,
"max": 1203.4016242819084,
"count": 39
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2388.374437477262,
"min": 2388.374437477262,
"max": 23929.033746886005,
"count": 39
},
"SoccerTwos.Step.mean": {
"value": 499552.0,
"min": 9648.0,
"max": 499552.0,
"count": 50
},
"SoccerTwos.Step.sum": {
"value": 499552.0,
"min": 9648.0,
"max": 499552.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.00390473660081625,
"min": -0.08126641064882278,
"max": 0.014773346483707428,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.06638052314519882,
"min": -1.537827968597412,
"max": 0.2500327229499817,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0042207203805446625,
"min": -0.07779227942228317,
"max": 0.01406838372349739,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.07175225019454956,
"min": -1.4383354187011719,
"max": 0.23916251957416534,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": -0.4201750010251999,
"max": 0.3598666747411092,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": -7.921999998390675,
"max": 5.398000121116638,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": -0.4201750010251999,
"max": 0.3598666747411092,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": -7.921999998390675,
"max": 5.398000121116638,
"count": 50
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.015917055184642474,
"min": 0.011932552664075047,
"max": 0.02313203196778583,
"count": 23
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.015917055184642474,
"min": 0.011932552664075047,
"max": 0.02313203196778583,
"count": 23
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0008109367045108229,
"min": 3.57876278940239e-05,
"max": 0.005951197103907665,
"count": 23
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0008109367045108229,
"min": 3.57876278940239e-05,
"max": 0.005951197103907665,
"count": 23
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0008106520489188066,
"min": 3.531726955164534e-05,
"max": 0.005981516093015671,
"count": 23
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0008106520489188066,
"min": 3.531726955164534e-05,
"max": 0.005981516093015671,
"count": 23
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 23
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 23
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 23
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 23
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 23
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 23
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1719070914",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/home/diego/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1719071442"
},
"total": 528.704971621999,
"count": 1,
"self": 0.21809306699651643,
"children": {
"run_training.setup": {
"total": 0.030361275999894133,
"count": 1,
"self": 0.030361275999894133
},
"TrainerController.start_learning": {
"total": 528.4565172790026,
"count": 1,
"self": 0.5282690684616682,
"children": {
"TrainerController._reset_env": {
"total": 2.1051965660008136,
"count": 13,
"self": 2.1051965660008136
},
"TrainerController.advance": {
"total": 525.6977479285451,
"count": 37164,
"self": 0.5636299799298286,
"children": {
"env_step": {
"total": 423.05774586966436,
"count": 37164,
"self": 309.7807341303487,
"children": {
"SubprocessEnvManager._take_step": {
"total": 112.92941606832755,
"count": 37164,
"self": 3.1370184365041496,
"children": {
"TorchPolicy.evaluate": {
"total": 109.7923976318234,
"count": 73750,
"self": 109.7923976318234
}
}
},
"workers": {
"total": 0.34759567098808475,
"count": 37164,
"self": 0.0,
"children": {
"worker_root": {
"total": 527.4745943693088,
"count": 37164,
"is_parallel": true,
"self": 281.41875222134695,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0014076890001888387,
"count": 2,
"is_parallel": true,
"self": 0.0003525610009091906,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001055127999279648,
"count": 8,
"is_parallel": true,
"self": 0.001055127999279648
}
}
},
"UnityEnvironment.step": {
"total": 0.015537252998910844,
"count": 1,
"is_parallel": true,
"self": 0.0003910280029231217,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002788439996948,
"count": 1,
"is_parallel": true,
"self": 0.0002788439996948
},
"communicator.exchange": {
"total": 0.013840812996932073,
"count": 1,
"is_parallel": true,
"self": 0.013840812996932073
},
"steps_from_proto": {
"total": 0.00102656799936085,
"count": 2,
"is_parallel": true,
"self": 0.0002212470026279334,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008053209967329167,
"count": 8,
"is_parallel": true,
"self": 0.0008053209967329167
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 246.04431799796657,
"count": 37163,
"is_parallel": true,
"self": 13.048685674148146,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 9.000386016799894,
"count": 37163,
"is_parallel": true,
"self": 9.000386016799894
},
"communicator.exchange": {
"total": 187.2834473853436,
"count": 37163,
"is_parallel": true,
"self": 187.2834473853436
},
"steps_from_proto": {
"total": 36.71179892167493,
"count": 74326,
"is_parallel": true,
"self": 7.200647920806659,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.511151000868267,
"count": 297304,
"is_parallel": true,
"self": 29.511151000868267
}
}
}
}
},
"steps_from_proto": {
"total": 0.011524149995238986,
"count": 24,
"is_parallel": true,
"self": 0.002306710015545832,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.009217439979693154,
"count": 96,
"is_parallel": true,
"self": 0.009217439979693154
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 102.07637207895095,
"count": 37164,
"self": 4.291560851252143,
"children": {
"process_trajectory": {
"total": 26.002775756704068,
"count": 37164,
"self": 25.9022926027028,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1004831540012674,
"count": 1,
"self": 0.1004831540012674
}
}
},
"_update_policy": {
"total": 71.78203547099474,
"count": 23,
"self": 38.797311159993114,
"children": {
"TorchPOCAOptimizer.update": {
"total": 32.98472431100163,
"count": 690,
"self": 32.98472431100163
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.839967004954815e-07,
"count": 1,
"self": 4.839967004954815e-07
},
"TrainerController._save_models": {
"total": 0.12530323199825943,
"count": 1,
"self": 0.001199418995383894,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12410381300287554,
"count": 1,
"self": 0.12410381300287554
}
}
}
}
}
}
}