ppo-Huggy / run_logs /timers.json
pryjuli's picture
Huggy
bcf0691
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4056576490402222,
"min": 1.4056576490402222,
"max": 1.4267321825027466,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70077.65625,
"min": 69155.4375,
"max": 76046.453125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 84.59075342465754,
"min": 73.45833333333333,
"max": 403.1290322580645,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49401.0,
"min": 49027.0,
"max": 49988.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999995.0,
"min": 49508.0,
"max": 1999995.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999995.0,
"min": 49508.0,
"max": 1999995.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4493749141693115,
"min": 0.17230083048343658,
"max": 2.508056640625,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1430.4349365234375,
"min": 21.193002700805664,
"max": 1634.14404296875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.743330927131927,
"min": 1.8287744250723985,
"max": 3.9486608097776887,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2186.1052614450455,
"min": 224.93925428390503,
"max": 2568.0098422169685,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.743330927131927,
"min": 1.8287744250723985,
"max": 3.9486608097776887,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2186.1052614450455,
"min": 224.93925428390503,
"max": 2568.0098422169685,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01920094059928993,
"min": 0.013892927007206407,
"max": 0.02036442214254445,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05760282179786979,
"min": 0.027995130844647068,
"max": 0.05760282179786979,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04732358761959606,
"min": 0.021606675380220014,
"max": 0.06191406939178705,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.14197076285878818,
"min": 0.04321335076044003,
"max": 0.1830188805858294,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.623598792166669e-06,
"min": 3.623598792166669e-06,
"max": 0.00029531032656322505,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0870796376500008e-05,
"min": 1.0870796376500008e-05,
"max": 0.0008439790686736499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10120783333333337,
"min": 0.10120783333333337,
"max": 0.19843677500000007,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3036235000000001,
"min": 0.20754615000000004,
"max": 0.5813263499999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.027088333333336e-05,
"min": 7.027088333333336e-05,
"max": 0.004921995072500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021081265000000009,
"min": 0.00021081265000000009,
"max": 0.014068184865000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675890149",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675892473"
},
"total": 2323.370545422,
"count": 1,
"self": 0.8227431289997185,
"children": {
"run_training.setup": {
"total": 0.10943830900009743,
"count": 1,
"self": 0.10943830900009743
},
"TrainerController.start_learning": {
"total": 2322.438363984,
"count": 1,
"self": 4.018427537974276,
"children": {
"TrainerController._reset_env": {
"total": 10.169145153000045,
"count": 1,
"self": 10.169145153000045
},
"TrainerController.advance": {
"total": 2308.085576688025,
"count": 233176,
"self": 4.298021614023128,
"children": {
"env_step": {
"total": 1797.1946406369182,
"count": 233176,
"self": 1500.9078244710556,
"children": {
"SubprocessEnvManager._take_step": {
"total": 293.57182109887344,
"count": 233176,
"self": 14.97781965986428,
"children": {
"TorchPolicy.evaluate": {
"total": 278.59400143900916,
"count": 222988,
"self": 70.59276565209348,
"children": {
"TorchPolicy.sample_actions": {
"total": 208.0012357869157,
"count": 222988,
"self": 208.0012357869157
}
}
}
}
},
"workers": {
"total": 2.714995066989104,
"count": 233176,
"self": 0.0,
"children": {
"worker_root": {
"total": 2314.3301972841,
"count": 233176,
"is_parallel": true,
"self": 1093.9337691282053,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021674079998774687,
"count": 1,
"is_parallel": true,
"self": 0.00033318999999210064,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001834217999885368,
"count": 2,
"is_parallel": true,
"self": 0.001834217999885368
}
}
},
"UnityEnvironment.step": {
"total": 0.02980574599996544,
"count": 1,
"is_parallel": true,
"self": 0.00031375799994748377,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019524999993336678,
"count": 1,
"is_parallel": true,
"self": 0.00019524999993336678
},
"communicator.exchange": {
"total": 0.02857033300006151,
"count": 1,
"is_parallel": true,
"self": 0.02857033300006151
},
"steps_from_proto": {
"total": 0.0007264050000230782,
"count": 1,
"is_parallel": true,
"self": 0.0002582250001523789,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00046817999987069925,
"count": 2,
"is_parallel": true,
"self": 0.00046817999987069925
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1220.3964281558947,
"count": 233175,
"is_parallel": true,
"self": 38.390595390608496,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 76.44713608608299,
"count": 233175,
"is_parallel": true,
"self": 76.44713608608299
},
"communicator.exchange": {
"total": 1015.6605691031441,
"count": 233175,
"is_parallel": true,
"self": 1015.6605691031441
},
"steps_from_proto": {
"total": 89.8981275760591,
"count": 233175,
"is_parallel": true,
"self": 36.446448905888246,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.45167867017085,
"count": 466350,
"is_parallel": true,
"self": 53.45167867017085
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 506.59291443708366,
"count": 233176,
"self": 6.357791014082068,
"children": {
"process_trajectory": {
"total": 162.31611195600317,
"count": 233176,
"self": 160.9084063640032,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4077055919999566,
"count": 10,
"self": 1.4077055919999566
}
}
},
"_update_policy": {
"total": 337.9190114669984,
"count": 97,
"self": 281.4391620929939,
"children": {
"TorchPPOOptimizer.update": {
"total": 56.479849374004516,
"count": 2910,
"self": 56.479849374004516
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4760003068658989e-06,
"count": 1,
"self": 1.4760003068658989e-06
},
"TrainerController._save_models": {
"total": 0.16521312900022167,
"count": 1,
"self": 0.002793858000131877,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1624192710000898,
"count": 1,
"self": 0.1624192710000898
}
}
}
}
}
}
}