ppo-Huggy / run_logs /timers.json
dyedream's picture
Huggy
a62f7aa
raw
history blame
17.5 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.412666916847229,
"min": 1.412666916847229,
"max": 1.4301310777664185,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70510.4453125,
"min": 69283.484375,
"max": 76415.109375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 85.36269430051813,
"min": 80.99341021416804,
"max": 390.0387596899225,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49425.0,
"min": 49080.0,
"max": 50315.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999505.0,
"min": 49791.0,
"max": 1999505.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999505.0,
"min": 49791.0,
"max": 1999505.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4650397300720215,
"min": 0.1154681146144867,
"max": 2.482987403869629,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1427.258056640625,
"min": 14.779918670654297,
"max": 1501.5838623046875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7303974045784773,
"min": 1.8203032775782049,
"max": 3.9419448622721154,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2159.9000972509384,
"min": 232.99881953001022,
"max": 2349.3991379141808,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7303974045784773,
"min": 1.8203032775782049,
"max": 3.9419448622721154,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2159.9000972509384,
"min": 232.99881953001022,
"max": 2349.3991379141808,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016701989337020173,
"min": 0.011924134413129651,
"max": 0.02008317175883955,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.050105968011060514,
"min": 0.023848268826259303,
"max": 0.05698549668110597,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.059477083012461664,
"min": 0.02169910380616784,
"max": 0.06200636294153002,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.178431249037385,
"min": 0.04339820761233568,
"max": 0.18601908882459006,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.684048772016669e-06,
"min": 3.684048772016669e-06,
"max": 0.00029530792656402497,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1052146316050007e-05,
"min": 1.1052146316050007e-05,
"max": 0.0008438358187214,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10122798333333334,
"min": 0.10122798333333334,
"max": 0.19843597499999996,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30368395000000004,
"min": 0.20758815000000008,
"max": 0.5812786000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.127636833333338e-05,
"min": 7.127636833333338e-05,
"max": 0.0049219551525,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021382910500000014,
"min": 0.00021382910500000014,
"max": 0.014065802139999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685694303",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1685696839"
},
"total": 2536.0043505050003,
"count": 1,
"self": 0.4350634990000799,
"children": {
"run_training.setup": {
"total": 0.06585350400001744,
"count": 1,
"self": 0.06585350400001744
},
"TrainerController.start_learning": {
"total": 2535.503433502,
"count": 1,
"self": 4.836466025968548,
"children": {
"TrainerController._reset_env": {
"total": 3.898415491999998,
"count": 1,
"self": 3.898415491999998
},
"TrainerController.advance": {
"total": 2526.6362959970315,
"count": 232452,
"self": 4.844880391923198,
"children": {
"env_step": {
"total": 1972.2957822740377,
"count": 232452,
"self": 1666.078679811059,
"children": {
"SubprocessEnvManager._take_step": {
"total": 303.0875268239952,
"count": 232452,
"self": 18.147715367007038,
"children": {
"TorchPolicy.evaluate": {
"total": 284.93981145698814,
"count": 222940,
"self": 284.93981145698814
}
}
},
"workers": {
"total": 3.1295756389837095,
"count": 232452,
"self": 0.0,
"children": {
"worker_root": {
"total": 2527.124611010941,
"count": 232452,
"is_parallel": true,
"self": 1168.6859308429562,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009486259999960112,
"count": 1,
"is_parallel": true,
"self": 0.00026472299998658855,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006839030000094226,
"count": 2,
"is_parallel": true,
"self": 0.0006839030000094226
}
}
},
"UnityEnvironment.step": {
"total": 0.029360693999990417,
"count": 1,
"is_parallel": true,
"self": 0.0003275520000158849,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00024957299999073257,
"count": 1,
"is_parallel": true,
"self": 0.00024957299999073257
},
"communicator.exchange": {
"total": 0.02803429799999435,
"count": 1,
"is_parallel": true,
"self": 0.02803429799999435
},
"steps_from_proto": {
"total": 0.0007492709999894487,
"count": 1,
"is_parallel": true,
"self": 0.00021246899999027846,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005368019999991702,
"count": 2,
"is_parallel": true,
"self": 0.0005368019999991702
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1358.4386801679848,
"count": 232451,
"is_parallel": true,
"self": 41.60023598294811,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.41278488000242,
"count": 232451,
"is_parallel": true,
"self": 80.41278488000242
},
"communicator.exchange": {
"total": 1136.7598045059144,
"count": 232451,
"is_parallel": true,
"self": 1136.7598045059144
},
"steps_from_proto": {
"total": 99.6658547991199,
"count": 232451,
"is_parallel": true,
"self": 36.026718154115684,
"children": {
"_process_rank_one_or_two_observation": {
"total": 63.639136645004214,
"count": 464902,
"is_parallel": true,
"self": 63.639136645004214
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 549.4956333310706,
"count": 232452,
"self": 7.772014093123175,
"children": {
"process_trajectory": {
"total": 141.62922935794688,
"count": 232452,
"self": 140.1421350539469,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4870943039999815,
"count": 10,
"self": 1.4870943039999815
}
}
},
"_update_policy": {
"total": 400.09438988000056,
"count": 97,
"self": 337.5155229179989,
"children": {
"TorchPPOOptimizer.update": {
"total": 62.578866962001655,
"count": 2910,
"self": 62.578866962001655
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.919999683916103e-07,
"count": 1,
"self": 9.919999683916103e-07
},
"TrainerController._save_models": {
"total": 0.13225499500003934,
"count": 1,
"self": 0.0022374019999915618,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13001759300004778,
"count": 1,
"self": 0.13001759300004778
}
}
}
}
}
}
}