ppo-Huggy / run_logs /timers.json
mewbot97's picture
Huggy
4e425dc
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.3998801708221436,
"min": 1.3998801708221436,
"max": 1.4291112422943115,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70392.9765625,
"min": 68800.6953125,
"max": 78723.140625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 74.43030303030304,
"min": 72.6470588235294,
"max": 386.33076923076925,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49124.0,
"min": 49124.0,
"max": 50223.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999949.0,
"min": 49762.0,
"max": 1999949.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999949.0,
"min": 49762.0,
"max": 1999949.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.527768850326538,
"min": 0.12004044651985168,
"max": 2.5643064975738525,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1670.855224609375,
"min": 15.485218048095703,
"max": 1693.159423828125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.9148361523284856,
"min": 1.80631800469502,
"max": 4.007994270946669,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2587.706696689129,
"min": 233.01502260565758,
"max": 2645.117598235607,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.9148361523284856,
"min": 1.80631800469502,
"max": 4.007994270946669,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2587.706696689129,
"min": 233.01502260565758,
"max": 2645.117598235607,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.019328354427772056,
"min": 0.012269361506575175,
"max": 0.020428447857072266,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05798506328331617,
"min": 0.02453872301315035,
"max": 0.05798506328331617,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05938288639816972,
"min": 0.02319566908602913,
"max": 0.06822556373145845,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17814865919450917,
"min": 0.04639133817205826,
"max": 0.20467669119437537,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.7606987464666704e-06,
"min": 3.7606987464666704e-06,
"max": 0.0002953107015630999,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.128209623940001e-05,
"min": 1.128209623940001e-05,
"max": 0.0008442861185713,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10125353333333333,
"min": 0.10125353333333333,
"max": 0.19843689999999997,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3037606,
"min": 0.20764795000000003,
"max": 0.5814287,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.255131333333338e-05,
"min": 7.255131333333338e-05,
"max": 0.004922001309999998,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021765394000000012,
"min": 0.00021765394000000012,
"max": 0.014073292129999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670350382",
"python_version": "3.8.15 (default, Oct 12 2022, 19:14:39) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics --resume",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670352762"
},
"total": 2379.8551047090004,
"count": 1,
"self": 0.4250304330007566,
"children": {
"run_training.setup": {
"total": 0.10552289399993242,
"count": 1,
"self": 0.10552289399993242
},
"TrainerController.start_learning": {
"total": 2379.3245513819998,
"count": 1,
"self": 4.561719604969312,
"children": {
"TrainerController._reset_env": {
"total": 6.188520511999968,
"count": 1,
"self": 6.188520511999968
},
"TrainerController.advance": {
"total": 2368.4501910470303,
"count": 233428,
"self": 4.5644643907999125,
"children": {
"env_step": {
"total": 1881.6521717481373,
"count": 233428,
"self": 1570.2923314681254,
"children": {
"SubprocessEnvManager._take_step": {
"total": 308.49863622104397,
"count": 233428,
"self": 15.846288048039128,
"children": {
"TorchPolicy.evaluate": {
"total": 292.65234817300484,
"count": 222972,
"self": 72.63852419996408,
"children": {
"TorchPolicy.sample_actions": {
"total": 220.01382397304076,
"count": 222972,
"self": 220.01382397304076
}
}
}
}
},
"workers": {
"total": 2.861204058967928,
"count": 233428,
"self": 0.0,
"children": {
"worker_root": {
"total": 2370.1607106180268,
"count": 233428,
"is_parallel": true,
"self": 1085.0058566318962,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008461579999448077,
"count": 1,
"is_parallel": true,
"self": 0.000305168000068079,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005409899998767287,
"count": 2,
"is_parallel": true,
"self": 0.0005409899998767287
}
}
},
"UnityEnvironment.step": {
"total": 0.029092112999933306,
"count": 1,
"is_parallel": true,
"self": 0.0002660400000422669,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021278299993809924,
"count": 1,
"is_parallel": true,
"self": 0.00021278299993809924
},
"communicator.exchange": {
"total": 0.027608283999938976,
"count": 1,
"is_parallel": true,
"self": 0.027608283999938976
},
"steps_from_proto": {
"total": 0.0010050060000139638,
"count": 1,
"is_parallel": true,
"self": 0.00042839699995056435,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005766090000633994,
"count": 2,
"is_parallel": true,
"self": 0.0005766090000633994
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1285.1548539861305,
"count": 233427,
"is_parallel": true,
"self": 35.940918034214064,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 82.39069884202343,
"count": 233427,
"is_parallel": true,
"self": 82.39069884202343
},
"communicator.exchange": {
"total": 1067.6337369109988,
"count": 233427,
"is_parallel": true,
"self": 1067.6337369109988
},
"steps_from_proto": {
"total": 99.18950019889428,
"count": 233427,
"is_parallel": true,
"self": 43.19649679206202,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.99300340683226,
"count": 466854,
"is_parallel": true,
"self": 55.99300340683226
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 482.2335549080931,
"count": 233428,
"self": 6.5034912930142355,
"children": {
"process_trajectory": {
"total": 163.98163764507842,
"count": 233428,
"self": 163.4806729410783,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5009647040001255,
"count": 4,
"self": 0.5009647040001255
}
}
},
"_update_policy": {
"total": 311.7484259700004,
"count": 97,
"self": 257.14053787600824,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.60788809399219,
"count": 2910,
"self": 54.60788809399219
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.247000000148546e-06,
"count": 1,
"self": 1.247000000148546e-06
},
"TrainerController._save_models": {
"total": 0.1241189709999162,
"count": 1,
"self": 0.0026506839999456133,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12146828699997059,
"count": 1,
"self": 0.12146828699997059
}
}
}
}
}
}
}