poca-SoccerTwos / run_logs /timers.json
BBBBirdIsTheWord's picture
First Push`
c4bef0a
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.2957684993743896,
"min": 3.2957684993743896,
"max": 3.2957684993743896,
"count": 1
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 105464.59375,
"min": 105464.59375,
"max": 105464.59375,
"count": 1
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 876.875,
"min": 876.875,
"max": 876.875,
"count": 1
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 28060.0,
"min": 28060.0,
"max": 28060.0,
"count": 1
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1200.4971269630323,
"min": 1200.4971269630323,
"max": 1200.4971269630323,
"count": 1
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 4801.988507852129,
"min": 4801.988507852129,
"max": 4801.988507852129,
"count": 1
},
"SoccerTwos.Step.mean": {
"value": 9046.0,
"min": 9046.0,
"max": 9046.0,
"count": 1
},
"SoccerTwos.Step.sum": {
"value": 9046.0,
"min": 9046.0,
"max": 9046.0,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.07640378922224045,
"min": -0.07640378922224045,
"max": -0.07640378922224045,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.8404417037963867,
"min": -0.8404417037963867,
"max": -0.8404417037963867,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.07640746235847473,
"min": -0.07640746235847473,
"max": -0.07640746235847473,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.8404820561408997,
"min": -0.8404820561408997,
"max": -0.8404820561408997,
"count": 1
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.0356363599950617,
"min": -0.0356363599950617,
"max": -0.0356363599950617,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -0.3919999599456787,
"min": -0.3919999599456787,
"max": -0.3919999599456787,
"count": 1
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.0356363599950617,
"min": -0.0356363599950617,
"max": -0.0356363599950617,
"count": 1
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -0.3919999599456787,
"min": -0.3919999599456787,
"max": -0.3919999599456787,
"count": 1
},
"SoccerTwos.IsTraining.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1
},
"SoccerTwos.IsTraining.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1695672941",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\16175\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1695672965"
},
"total": 24.3498265,
"count": 1,
"self": 0.2337857000000021,
"children": {
"run_training.setup": {
"total": 0.1006904999999998,
"count": 1,
"self": 0.1006904999999998
},
"TrainerController.start_learning": {
"total": 24.015350299999998,
"count": 1,
"self": 0.021411200000034825,
"children": {
"TrainerController._reset_env": {
"total": 4.7525206,
"count": 1,
"self": 4.7525206
},
"TrainerController.advance": {
"total": 19.10917519999996,
"count": 1000,
"self": 0.021244999999861847,
"children": {
"env_step": {
"total": 16.827379000000093,
"count": 1000,
"self": 13.214282000000043,
"children": {
"SubprocessEnvManager._take_step": {
"total": 3.5992805000000168,
"count": 1000,
"self": 0.15992610000000784,
"children": {
"TorchPolicy.evaluate": {
"total": 3.439354400000009,
"count": 2000,
"self": 3.439354400000009
}
}
},
"workers": {
"total": 0.013816500000031873,
"count": 1000,
"self": 0.0,
"children": {
"worker_root": {
"total": 17.26760290000003,
"count": 1000,
"is_parallel": true,
"self": 6.441386500000032,
"children": {
"steps_from_proto": {
"total": 0.001699799999999918,
"count": 2,
"is_parallel": true,
"self": 0.0003551999999986677,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013446000000012504,
"count": 8,
"is_parallel": true,
"self": 0.0013446000000012504
}
}
},
"UnityEnvironment.step": {
"total": 10.824516599999995,
"count": 1000,
"is_parallel": true,
"self": 0.47402580000002814,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.37056259999999774,
"count": 1000,
"is_parallel": true,
"self": 0.37056259999999774
},
"communicator.exchange": {
"total": 8.488509099999941,
"count": 1000,
"is_parallel": true,
"self": 8.488509099999941
},
"steps_from_proto": {
"total": 1.4914191000000283,
"count": 2000,
"is_parallel": true,
"self": 0.3147943000001012,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1.176624799999927,
"count": 8000,
"is_parallel": true,
"self": 1.176624799999927
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2.2605512000000045,
"count": 1000,
"self": 0.09772029999997844,
"children": {
"process_trajectory": {
"total": 2.162830900000026,
"count": 1000,
"self": 2.162830900000026
}
}
}
}
},
"trainer_threads": {
"total": 7.000000010748408e-07,
"count": 1,
"self": 7.000000010748408e-07
},
"TrainerController._save_models": {
"total": 0.13224260000000143,
"count": 1,
"self": 0.005603800000002934,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1266387999999985,
"count": 1,
"self": 0.1266387999999985
}
}
}
}
}
}
}