ppo-Pyramids / run_logs /timers.json
ulasfiliz954's picture
First Push
a22f8e1 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5028831362724304,
"min": 0.4934605360031128,
"max": 1.4368268251419067,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15014.078125,
"min": 14693.28125,
"max": 43587.578125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989930.0,
"min": 29952.0,
"max": 989930.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989930.0,
"min": 29952.0,
"max": 989930.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5835267901420593,
"min": -0.1807340532541275,
"max": 0.6015130877494812,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 164.55455017089844,
"min": -42.83396911621094,
"max": 165.4160919189453,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.027452293783426285,
"min": -0.027452293783426285,
"max": 0.33393594622612,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -7.741547107696533,
"min": -7.741547107696533,
"max": 79.142822265625,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06803573890741137,
"min": 0.06363366088512395,
"max": 0.0737662756883543,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9525003447037592,
"min": 0.506086928184357,
"max": 1.0327278596369602,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015568697703359854,
"min": 0.00010064681685805673,
"max": 0.016595749503938436,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21796176784703797,
"min": 0.0013084086191547375,
"max": 0.2323404930551381,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.40264038962857e-06,
"min": 7.40264038962857e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010363696545479999,
"min": 0.00010363696545479999,
"max": 0.0035071004309666,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10246751428571431,
"min": 0.10246751428571431,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4345452000000003,
"min": 1.3691136000000002,
"max": 2.569033400000001,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025650467714285715,
"min": 0.00025650467714285715,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00359106548,
"min": 0.00359106548,
"max": 0.11692643666000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009652777574956417,
"min": 0.009652777574956417,
"max": 0.4172501266002655,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1351388841867447,
"min": 0.1351388841867447,
"max": 2.920750856399536,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 333.01063829787233,
"min": 310.03225806451616,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31303.0,
"min": 15984.0,
"max": 32440.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.624066651989055,
"min": -1.0000000521540642,
"max": 1.6370543264176534,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 151.0381986349821,
"min": -32.000001668930054,
"max": 151.0381986349821,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.624066651989055,
"min": -1.0000000521540642,
"max": 1.6370543264176534,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 151.0381986349821,
"min": -32.000001668930054,
"max": 151.0381986349821,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03392111031921901,
"min": 0.03242105406819172,
"max": 8.880287981592119,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.154663259687368,
"min": 3.0151580283418298,
"max": 142.0846077054739,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1715168772",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1715170401"
},
"total": 1629.72557081,
"count": 1,
"self": 0.32248322599957646,
"children": {
"run_training.setup": {
"total": 0.052455744000099,
"count": 1,
"self": 0.052455744000099
},
"TrainerController.start_learning": {
"total": 1629.3506318400002,
"count": 1,
"self": 1.3465455260197814,
"children": {
"TrainerController._reset_env": {
"total": 2.036261887000137,
"count": 1,
"self": 2.036261887000137
},
"TrainerController.advance": {
"total": 1625.8862360679805,
"count": 63737,
"self": 1.2771937789618732,
"children": {
"env_step": {
"total": 1060.855880641004,
"count": 63737,
"self": 940.6056499590359,
"children": {
"SubprocessEnvManager._take_step": {
"total": 119.43753719197548,
"count": 63737,
"self": 4.302538014959055,
"children": {
"TorchPolicy.evaluate": {
"total": 115.13499917701643,
"count": 62560,
"self": 115.13499917701643
}
}
},
"workers": {
"total": 0.8126934899926255,
"count": 63737,
"self": 0.0,
"children": {
"worker_root": {
"total": 1627.2214600270554,
"count": 63737,
"is_parallel": true,
"self": 784.5912084411284,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021605249999083753,
"count": 1,
"is_parallel": true,
"self": 0.0006672379997780808,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014932870001302945,
"count": 8,
"is_parallel": true,
"self": 0.0014932870001302945
}
}
},
"UnityEnvironment.step": {
"total": 0.03509324799983915,
"count": 1,
"is_parallel": true,
"self": 0.0004235129999869969,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00029668100000890263,
"count": 1,
"is_parallel": true,
"self": 0.00029668100000890263
},
"communicator.exchange": {
"total": 0.03326184100001228,
"count": 1,
"is_parallel": true,
"self": 0.03326184100001228
},
"steps_from_proto": {
"total": 0.0011112129998309683,
"count": 1,
"is_parallel": true,
"self": 0.0002450449994739756,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008661680003569927,
"count": 8,
"is_parallel": true,
"self": 0.0008661680003569927
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 842.630251585927,
"count": 63736,
"is_parallel": true,
"self": 21.563971725002148,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 13.94617676195935,
"count": 63736,
"is_parallel": true,
"self": 13.94617676195935
},
"communicator.exchange": {
"total": 745.3669176319861,
"count": 63736,
"is_parallel": true,
"self": 745.3669176319861
},
"steps_from_proto": {
"total": 61.7531854669794,
"count": 63736,
"is_parallel": true,
"self": 13.419754748951163,
"children": {
"_process_rank_one_or_two_observation": {
"total": 48.33343071802824,
"count": 509888,
"is_parallel": true,
"self": 48.33343071802824
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 563.7531616480146,
"count": 63737,
"self": 2.4172667940306383,
"children": {
"process_trajectory": {
"total": 114.50943362598332,
"count": 63737,
"self": 114.3222039859836,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18722963999971398,
"count": 2,
"self": 0.18722963999971398
}
}
},
"_update_policy": {
"total": 446.82646122800065,
"count": 442,
"self": 259.99159265501135,
"children": {
"TorchPPOOptimizer.update": {
"total": 186.8348685729893,
"count": 22803,
"self": 186.8348685729893
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0250000741507392e-06,
"count": 1,
"self": 1.0250000741507392e-06
},
"TrainerController._save_models": {
"total": 0.08158733399977791,
"count": 1,
"self": 0.0013520319998860941,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08023530199989182,
"count": 1,
"self": 0.08023530199989182
}
}
}
}
}
}
}