poca-SoccerTwos / run_logs /timers.json
conlan's picture
First Push
65fa8f7 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.2956435680389404,
"min": 3.2956435680389404,
"max": 3.2956435680389404,
"count": 1
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 105460.59375,
"min": 105460.59375,
"max": 105460.59375,
"count": 1
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 689.7,
"min": 689.7,
"max": 689.7,
"count": 1
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 27588.0,
"min": 27588.0,
"max": 27588.0,
"count": 1
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1197.5733196942704,
"min": 1197.5733196942704,
"max": 1197.5733196942704,
"count": 1
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 11975.733196942703,
"min": 11975.733196942703,
"max": 11975.733196942703,
"count": 1
},
"SoccerTwos.Step.mean": {
"value": 9814.0,
"min": 9814.0,
"max": 9814.0,
"count": 1
},
"SoccerTwos.Step.sum": {
"value": 9814.0,
"min": 9814.0,
"max": 9814.0,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.049709100276231766,
"min": -0.049709100276231766,
"max": -0.049709100276231766,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.7953456044197083,
"min": -0.7953456044197083,
"max": -0.7953456044197083,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.049606382846832275,
"min": -0.049606382846832275,
"max": -0.049606382846832275,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.7937021255493164,
"min": -0.7937021255493164,
"max": -0.7937021255493164,
"count": 1
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.407150000333786,
"min": -0.407150000333786,
"max": -0.407150000333786,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -6.514400005340576,
"min": -6.514400005340576,
"max": -6.514400005340576,
"count": 1
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.407150000333786,
"min": -0.407150000333786,
"max": -0.407150000333786,
"count": 1
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -6.514400005340576,
"min": -6.514400005340576,
"max": -6.514400005340576,
"count": 1
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1713753819",
"python_version": "3.10.12 (main, Jul 5 2023, 15:34:07) [Clang 14.0.6 ]",
"command_line_arguments": "/Users/conlanrios/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.2",
"numpy_version": "1.23.5",
"end_time_seconds": "1713753957"
},
"total": 138.0961150819203,
"count": 1,
"self": 0.29653055081143975,
"children": {
"run_training.setup": {
"total": 0.04165997006930411,
"count": 1,
"self": 0.04165997006930411
},
"TrainerController.start_learning": {
"total": 137.75792456103954,
"count": 1,
"self": 0.028438515961170197,
"children": {
"TrainerController._reset_env": {
"total": 6.768105186987668,
"count": 1,
"self": 6.768105186987668
},
"TrainerController.advance": {
"total": 130.73639970924705,
"count": 1157,
"self": 0.02896854595746845,
"children": {
"env_step": {
"total": 128.1499001227785,
"count": 1157,
"self": 123.84522933594417,
"children": {
"SubprocessEnvManager._take_step": {
"total": 4.286475580185652,
"count": 1157,
"self": 0.16512485139537603,
"children": {
"TorchPolicy.evaluate": {
"total": 4.121350728790276,
"count": 2308,
"self": 4.121350728790276
}
}
},
"workers": {
"total": 0.018195206648670137,
"count": 1156,
"self": 0.0,
"children": {
"worker_root": {
"total": 132.20726126793306,
"count": 1156,
"is_parallel": true,
"self": 11.972756742383353,
"children": {
"steps_from_proto": {
"total": 0.004312571021728218,
"count": 2,
"is_parallel": true,
"self": 0.0007999339140951633,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0035126371076330543,
"count": 8,
"is_parallel": true,
"self": 0.0035126371076330543
}
}
},
"UnityEnvironment.step": {
"total": 120.23019195452798,
"count": 1156,
"is_parallel": true,
"self": 0.35049448150675744,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.366147710941732,
"count": 1156,
"is_parallel": true,
"self": 2.366147710941732
},
"communicator.exchange": {
"total": 113.23947233846411,
"count": 1156,
"is_parallel": true,
"self": 113.23947233846411
},
"steps_from_proto": {
"total": 4.274077423615381,
"count": 2312,
"is_parallel": true,
"self": 0.6028466142015532,
"children": {
"_process_rank_one_or_two_observation": {
"total": 3.671230809413828,
"count": 9248,
"is_parallel": true,
"self": 3.671230809413828
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2.557531040511094,
"count": 1156,
"self": 0.11648016411345452,
"children": {
"process_trajectory": {
"total": 2.4410508763976395,
"count": 1156,
"self": 2.4410508763976395
}
}
}
}
},
"trainer_threads": {
"total": 1.483946107327938e-06,
"count": 1,
"self": 1.483946107327938e-06
},
"TrainerController._save_models": {
"total": 0.22497966489754617,
"count": 1,
"self": 0.0014202659949660301,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22355939890258014,
"count": 1,
"self": 0.22355939890258014
}
}
}
}
}
}
}