poca-SoccerTwos / run_logs /timers.json
Ricardo54321's picture
First Push`
96ea798 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.147136688232422,
"min": 3.047994375228882,
"max": 3.295745849609375,
"count": 621
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 27694.802734375,
"min": 15971.748046875,
"max": 160773.265625,
"count": 621
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 956.0,
"min": 412.57142857142856,
"max": 999.0,
"count": 621
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19120.0,
"min": 14576.0,
"max": 25272.0,
"count": 621
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1207.3728502053852,
"min": 1197.745733216471,
"max": 1218.3622745937141,
"count": 384
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 4829.491400821541,
"min": 2395.491466432942,
"max": 16847.808579292,
"count": 384
},
"SoccerTwos.Step.mean": {
"value": 6209213.0,
"min": 9652.0,
"max": 6209213.0,
"count": 621
},
"SoccerTwos.Step.sum": {
"value": 6209213.0,
"min": 9652.0,
"max": 6209213.0,
"count": 621
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.01980649121105671,
"min": -0.10010386258363724,
"max": 0.01263430342078209,
"count": 621
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.1980649083852768,
"min": -1.3013502359390259,
"max": 0.16424594819545746,
"count": 621
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.019652599468827248,
"min": -0.10012737661600113,
"max": 0.01516697183251381,
"count": 621
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.19652599096298218,
"min": -1.301655888557434,
"max": 0.19717063009738922,
"count": 621
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 621
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 621
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.09120000004768372,
"min": -0.5394461521735558,
"max": 0.319759996732076,
"count": 621
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -0.9120000004768372,
"min": -8.321600019931793,
"max": 4.79639995098114,
"count": 621
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.09120000004768372,
"min": -0.5394461521735558,
"max": 0.319759996732076,
"count": 621
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -0.9120000004768372,
"min": -8.321600019931793,
"max": 4.79639995098114,
"count": 621
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 621
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 621
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.014330222292725618,
"min": 0.010222057045029941,
"max": 0.023527015330425154,
"count": 289
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.014330222292725618,
"min": 0.010222057045029941,
"max": 0.023527015330425154,
"count": 289
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0029650022857822477,
"min": 1.0000124414470217e-07,
"max": 0.005467161916506787,
"count": 289
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0029650022857822477,
"min": 1.0000124414470217e-07,
"max": 0.005467161916506787,
"count": 289
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0029783171989644567,
"min": 1.0298553796417309e-07,
"max": 0.005529568235700329,
"count": 289
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0029783171989644567,
"min": 1.0298553796417309e-07,
"max": 0.005529568235700329,
"count": 289
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 289
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 289
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 289
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 289
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 289
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 289
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1719422820",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\riywa\\miniconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1719435667"
},
"total": 12847.146602499997,
"count": 1,
"self": 10.015134099987336,
"children": {
"run_training.setup": {
"total": 0.09335929999360815,
"count": 1,
"self": 0.09335929999360815
},
"TrainerController.start_learning": {
"total": 12837.038109100016,
"count": 1,
"self": 7.793591305642622,
"children": {
"TrainerController._reset_env": {
"total": 8.266323699936038,
"count": 31,
"self": 8.266323699936038
},
"TrainerController.advance": {
"total": 12820.86721929445,
"count": 404275,
"self": 8.218091678572819,
"children": {
"env_step": {
"total": 5668.175079494191,
"count": 404275,
"self": 4293.395965773903,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1369.7889026088815,
"count": 404275,
"self": 44.91610289394157,
"children": {
"TorchPolicy.evaluate": {
"total": 1324.87279971494,
"count": 803164,
"self": 1324.87279971494
}
}
},
"workers": {
"total": 4.990211111406097,
"count": 404274,
"self": 0.0,
"children": {
"worker_root": {
"total": 12821.396695613977,
"count": 404274,
"is_parallel": true,
"self": 9497.811211017019,
"children": {
"steps_from_proto": {
"total": 0.04330099999788217,
"count": 62,
"is_parallel": true,
"self": 0.009100599534576759,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.034200400463305414,
"count": 248,
"is_parallel": true,
"self": 0.034200400463305414
}
}
},
"UnityEnvironment.step": {
"total": 3323.542183596961,
"count": 404274,
"is_parallel": true,
"self": 167.53556519476115,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 145.13486739588552,
"count": 404274,
"is_parallel": true,
"self": 145.13486739588552
},
"communicator.exchange": {
"total": 2455.388107987208,
"count": 404274,
"is_parallel": true,
"self": 2455.388107987208
},
"steps_from_proto": {
"total": 555.4836430191062,
"count": 808548,
"is_parallel": true,
"self": 115.02406482619699,
"children": {
"_process_rank_one_or_two_observation": {
"total": 440.45957819290925,
"count": 3234192,
"is_parallel": true,
"self": 440.45957819290925
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 7144.474048121687,
"count": 404274,
"self": 61.25754881638568,
"children": {
"process_trajectory": {
"total": 813.1317693050951,
"count": 404274,
"self": 811.842035805108,
"children": {
"RLTrainer._checkpoint": {
"total": 1.289733499987051,
"count": 12,
"self": 1.289733499987051
}
}
},
"_update_policy": {
"total": 6270.084730000206,
"count": 289,
"self": 722.7870975014812,
"children": {
"TorchPOCAOptimizer.update": {
"total": 5547.297632498725,
"count": 8670,
"self": 5547.297632498725
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.999769877642393e-07,
"count": 1,
"self": 7.999769877642393e-07
},
"TrainerController._save_models": {
"total": 0.11097400001017377,
"count": 1,
"self": 0.006390900001861155,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10458310000831261,
"count": 1,
"self": 0.10458310000831261
}
}
}
}
}
}
}