CzarnyRycerz's picture
First Push`
049a4b2
raw
history blame
12.2 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.6196073293685913,
"min": 1.6196073293685913,
"max": 1.6196073293685913,
"count": 1
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 10831.93359375,
"min": 10831.93359375,
"max": 10831.93359375,
"count": 1
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 52.925925925925924,
"min": 52.925925925925924,
"max": 52.925925925925924,
"count": 1
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 5716.0,
"min": 5716.0,
"max": 5716.0,
"count": 1
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1521.016516083494,
"min": 1521.016516083494,
"max": 1521.016516083494,
"count": 1
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 82134.89186850868,
"min": 82134.89186850868,
"max": 82134.89186850868,
"count": 1
},
"SoccerTwos.Step.mean": {
"value": 7544889.0,
"min": 7544889.0,
"max": 7544889.0,
"count": 1
},
"SoccerTwos.Step.sum": {
"value": 7544889.0,
"min": 7544889.0,
"max": 7544889.0,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.028078727424144745,
"min": -0.028078727424144745,
"max": -0.028078727424144745,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -1.4881725311279297,
"min": -1.4881725311279297,
"max": -1.4881725311279297,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.030530137941241264,
"min": -0.030530137941241264,
"max": -0.030530137941241264,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -1.6180973052978516,
"min": -1.6180973052978516,
"max": -1.6180973052978516,
"count": 1
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.11261131853427526,
"min": -0.11261131853427526,
"max": -0.11261131853427526,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -5.968399882316589,
"min": -5.968399882316589,
"max": -5.968399882316589,
"count": 1
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.11261131853427526,
"min": -0.11261131853427526,
"max": -0.11261131853427526,
"count": 1
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -5.968399882316589,
"min": -5.968399882316589,
"max": -5.968399882316589,
"count": 1
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1694309186",
"python_version": "3.9.17 (main, Jul 5 2023, 20:47:11) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\F:\\ProgramData\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1694309204"
},
"total": 18.141477599999998,
"count": 1,
"self": 0.08926389999999884,
"children": {
"run_training.setup": {
"total": 0.1585276999999996,
"count": 1,
"self": 0.1585276999999996
},
"TrainerController.start_learning": {
"total": 17.893686,
"count": 1,
"self": 0.017375300000011862,
"children": {
"TrainerController._reset_env": {
"total": 5.037988500000001,
"count": 2,
"self": 5.037988500000001
},
"TrainerController.advance": {
"total": 12.528668399999981,
"count": 377,
"self": 0.014363899999960239,
"children": {
"env_step": {
"total": 9.479446400000006,
"count": 377,
"self": 7.023474400000003,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2.448307199999986,
"count": 377,
"self": 0.08237930000001548,
"children": {
"TorchPolicy.evaluate": {
"total": 2.3659278999999707,
"count": 686,
"self": 2.3659278999999707
}
}
},
"workers": {
"total": 0.0076648000000165695,
"count": 377,
"self": 0.0,
"children": {
"worker_root": {
"total": 12.690685499999999,
"count": 377,
"is_parallel": true,
"self": 7.292277399999977,
"children": {
"steps_from_proto": {
"total": 0.005625900000000961,
"count": 4,
"is_parallel": true,
"self": 0.0012017000000019706,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00442419999999899,
"count": 16,
"is_parallel": true,
"self": 0.00442419999999899
}
}
},
"UnityEnvironment.step": {
"total": 5.392782200000021,
"count": 377,
"is_parallel": true,
"self": 0.2729520000000365,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.2915378000000004,
"count": 377,
"is_parallel": true,
"self": 0.2915378000000004
},
"communicator.exchange": {
"total": 3.9169518000000068,
"count": 377,
"is_parallel": true,
"self": 3.9169518000000068
},
"steps_from_proto": {
"total": 0.9113405999999777,
"count": 754,
"is_parallel": true,
"self": 0.2103195999999503,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.7010210000000274,
"count": 3016,
"is_parallel": true,
"self": 0.7010210000000274
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 3.034858100000015,
"count": 377,
"self": 0.07185500000004552,
"children": {
"process_trajectory": {
"total": 2.9630030999999697,
"count": 377,
"self": 2.9630030999999697
}
}
}
}
},
"trainer_threads": {
"total": 1.900000000887303e-06,
"count": 1,
"self": 1.900000000887303e-06
},
"TrainerController._save_models": {
"total": 0.3096519000000022,
"count": 1,
"self": 0.0176764000000027,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2919754999999995,
"count": 1,
"self": 0.2919754999999995
}
}
}
}
}
}
}