ppo-Huggy / run_logs /timers.json
Mrpiggyz's picture
Huggy
3bb2077
raw
history blame
17.4 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4061872959136963,
"min": 1.4061872959136963,
"max": 1.4295545816421509,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71247.2890625,
"min": 69624.359375,
"max": 77195.78125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 78.19085173501577,
"min": 65.83823529411765,
"max": 377.0451127819549,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49573.0,
"min": 48996.0,
"max": 50147.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999479.0,
"min": 49661.0,
"max": 1999479.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999479.0,
"min": 49661.0,
"max": 1999479.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.5171334743499756,
"min": -0.04375865310430527,
"max": 2.5236191749572754,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1595.862548828125,
"min": -5.776142120361328,
"max": 1876.56005859375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.840823719084075,
"min": 1.900754555608287,
"max": 4.019871230719804,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2435.0822378993034,
"min": 250.89960134029388,
"max": 2921.7493010759354,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.840823719084075,
"min": 1.900754555608287,
"max": 4.019871230719804,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2435.0822378993034,
"min": 250.89960134029388,
"max": 2921.7493010759354,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018468828306100072,
"min": 0.013932955821413393,
"max": 0.020345190421600514,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05540648491830022,
"min": 0.027865911642826785,
"max": 0.05547375514870509,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05361906393534607,
"min": 0.023916071994851032,
"max": 0.06244307017574708,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16085719180603822,
"min": 0.047832143989702064,
"max": 0.18579907914002738,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.988648670483336e-06,
"min": 3.988648670483336e-06,
"max": 0.0002952837015721,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1965946011450006e-05,
"min": 1.1965946011450006e-05,
"max": 0.0008437971187343,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10132951666666663,
"min": 0.10132951666666663,
"max": 0.1984279,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3039885499999999,
"min": 0.20780614999999994,
"max": 0.5812657,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.634288166666669e-05,
"min": 7.634288166666669e-05,
"max": 0.004921552210000001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022902864500000007,
"min": 0.00022902864500000007,
"max": 0.014065158429999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1693237561",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1693240059"
},
"total": 2498.04658812,
"count": 1,
"self": 0.44298871199998757,
"children": {
"run_training.setup": {
"total": 0.06837155300001996,
"count": 1,
"self": 0.06837155300001996
},
"TrainerController.start_learning": {
"total": 2497.535227855,
"count": 1,
"self": 4.622787743042409,
"children": {
"TrainerController._reset_env": {
"total": 4.802724553999951,
"count": 1,
"self": 4.802724553999951
},
"TrainerController.advance": {
"total": 2487.9798390909573,
"count": 234072,
"self": 4.5290856361471015,
"children": {
"env_step": {
"total": 1916.9203293758985,
"count": 234072,
"self": 1620.756613649698,
"children": {
"SubprocessEnvManager._take_step": {
"total": 293.1565230581539,
"count": 234072,
"self": 16.741555282252307,
"children": {
"TorchPolicy.evaluate": {
"total": 276.4149677759016,
"count": 223074,
"self": 276.4149677759016
}
}
},
"workers": {
"total": 3.0071926680466277,
"count": 234072,
"self": 0.0,
"children": {
"worker_root": {
"total": 2489.866237155055,
"count": 234072,
"is_parallel": true,
"self": 1158.243639627998,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001110208000000057,
"count": 1,
"is_parallel": true,
"self": 0.0003587609999158303,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007514470000842266,
"count": 2,
"is_parallel": true,
"self": 0.0007514470000842266
}
}
},
"UnityEnvironment.step": {
"total": 0.030092806000084238,
"count": 1,
"is_parallel": true,
"self": 0.00035138400016876403,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00022434100003465574,
"count": 1,
"is_parallel": true,
"self": 0.00022434100003465574
},
"communicator.exchange": {
"total": 0.02877533499997753,
"count": 1,
"is_parallel": true,
"self": 0.02877533499997753
},
"steps_from_proto": {
"total": 0.0007417459999032872,
"count": 1,
"is_parallel": true,
"self": 0.0002146089998404932,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000527137000062794,
"count": 2,
"is_parallel": true,
"self": 0.000527137000062794
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1331.622597527057,
"count": 234071,
"is_parallel": true,
"self": 41.79038222000122,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 83.88881224396164,
"count": 234071,
"is_parallel": true,
"self": 83.88881224396164
},
"communicator.exchange": {
"total": 1104.3382973929852,
"count": 234071,
"is_parallel": true,
"self": 1104.3382973929852
},
"steps_from_proto": {
"total": 101.60510567010897,
"count": 234071,
"is_parallel": true,
"self": 36.194780673167315,
"children": {
"_process_rank_one_or_two_observation": {
"total": 65.41032499694165,
"count": 468142,
"is_parallel": true,
"self": 65.41032499694165
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 566.5304240789116,
"count": 234072,
"self": 6.6189845918803485,
"children": {
"process_trajectory": {
"total": 147.27707624203015,
"count": 234072,
"self": 145.8209675890297,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4561086530004559,
"count": 10,
"self": 1.4561086530004559
}
}
},
"_update_policy": {
"total": 412.63436324500105,
"count": 97,
"self": 351.8714479570074,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.76291528799368,
"count": 2910,
"self": 60.76291528799368
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.359000179945724e-06,
"count": 1,
"self": 1.359000179945724e-06
},
"TrainerController._save_models": {
"total": 0.12987510800030577,
"count": 1,
"self": 0.001854118000210292,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12802099000009548,
"count": 1,
"self": 0.12802099000009548
}
}
}
}
}
}
}