ppo-SoccerTwos / run_logs /timers.json
MRNH's picture
First Push
b34e005
raw
history blame
15.6 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.295693874359131,
"min": 3.295693874359131,
"max": 3.295693874359131,
"count": 1
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 110418.9296875,
"min": 110418.9296875,
"max": 110418.9296875,
"count": 1
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 634.2222222222222,
"min": 634.2222222222222,
"max": 634.2222222222222,
"count": 1
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 22832.0,
"min": 22832.0,
"max": 22832.0,
"count": 1
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1197.9710219322496,
"min": 1197.9710219322496,
"max": 1197.9710219322496,
"count": 1
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 11979.710219322496,
"min": 11979.710219322496,
"max": 11979.710219322496,
"count": 1
},
"SoccerTwos.Step.mean": {
"value": 9434.0,
"min": 9434.0,
"max": 9434.0,
"count": 1
},
"SoccerTwos.Step.sum": {
"value": 9434.0,
"min": 9434.0,
"max": 9434.0,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.03669814392924309,
"min": 0.03669814392924309,
"max": 0.03669814392924309,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.5871703028678894,
"min": 0.5871703028678894,
"max": 0.5871703028678894,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.03665364906191826,
"min": 0.03665364906191826,
"max": 0.03665364906191826,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.5864583849906921,
"min": 0.5864583849906921,
"max": 0.5864583849906921,
"count": 1
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.44384999945759773,
"min": -0.44384999945759773,
"max": -0.44384999945759773,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -7.101599991321564,
"min": -7.101599991321564,
"max": -7.101599991321564,
"count": 1
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.44384999945759773,
"min": -0.44384999945759773,
"max": -0.44384999945759773,
"count": 1
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -7.101599991321564,
"min": -7.101599991321564,
"max": -7.101599991321564,
"count": 1
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691871220",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/linux/SoccerTwos/SoccerTwos --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1691871273"
},
"total": 52.92985513199994,
"count": 1,
"self": 0.42652391100000386,
"children": {
"run_training.setup": {
"total": 0.04326205600000321,
"count": 1,
"self": 0.04326205600000321
},
"TrainerController.start_learning": {
"total": 52.460069164999936,
"count": 1,
"self": 0.04065984700048375,
"children": {
"TrainerController._reset_env": {
"total": 4.187897844999952,
"count": 1,
"self": 4.187897844999952
},
"TrainerController.advance": {
"total": 48.22342018799941,
"count": 1633,
"self": 0.040021946998194835,
"children": {
"env_step": {
"total": 46.09681821200422,
"count": 1633,
"self": 36.810695207003164,
"children": {
"SubprocessEnvManager._take_step": {
"total": 9.26380222399871,
"count": 1633,
"self": 0.2661621359967512,
"children": {
"TorchPolicy.evaluate": {
"total": 8.997640088001958,
"count": 3242,
"self": 8.997640088001958
}
}
},
"workers": {
"total": 0.022320781002349577,
"count": 1632,
"self": 0.0,
"children": {
"worker_root": {
"total": 52.319724798999346,
"count": 1632,
"is_parallel": true,
"self": 20.933880723998755,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006286292999902798,
"count": 2,
"is_parallel": true,
"self": 0.003791633999981059,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0024946589999217395,
"count": 8,
"is_parallel": true,
"self": 0.0024946589999217395
}
}
},
"UnityEnvironment.step": {
"total": 0.048291547999951945,
"count": 1,
"is_parallel": true,
"self": 0.0012161689999174996,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0008420479999813324,
"count": 1,
"is_parallel": true,
"self": 0.0008420479999813324
},
"communicator.exchange": {
"total": 0.04257049700004245,
"count": 1,
"is_parallel": true,
"self": 0.04257049700004245
},
"steps_from_proto": {
"total": 0.003662834000010662,
"count": 2,
"is_parallel": true,
"self": 0.0006274100001064653,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0030354239999041965,
"count": 8,
"is_parallel": true,
"self": 0.0030354239999041965
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 31.38584407500059,
"count": 1631,
"is_parallel": true,
"self": 1.8372215209967635,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1.1792259349999767,
"count": 1631,
"is_parallel": true,
"self": 1.1792259349999767
},
"communicator.exchange": {
"total": 22.27639839699691,
"count": 1631,
"is_parallel": true,
"self": 22.27639839699691
},
"steps_from_proto": {
"total": 6.0929982220069405,
"count": 3262,
"is_parallel": true,
"self": 1.014425716002279,
"children": {
"_process_rank_one_or_two_observation": {
"total": 5.078572506004662,
"count": 13048,
"is_parallel": true,
"self": 5.078572506004662
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2.086580028996991,
"count": 1632,
"self": 0.2830637349971994,
"children": {
"process_trajectory": {
"total": 1.8035162939997917,
"count": 1632,
"self": 1.8035162939997917
}
}
}
}
},
"trainer_threads": {
"total": 1.3500000477506546e-06,
"count": 1,
"self": 1.3500000477506546e-06
},
"TrainerController._save_models": {
"total": 0.008089935000043624,
"count": 1,
"self": 4.8711999966144504e-05,
"children": {
"RLTrainer._checkpoint": {
"total": 0.00804122300007748,
"count": 1,
"self": 0.00804122300007748
}
}
}
}
}
}
}