PPO-Pyramids / run_logs /timers.json
AlexandreManai's picture
First push of pyramids
1bb3b42 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5123621225357056,
"min": 0.5123621225357056,
"max": 1.4478181600570679,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15379.0615234375,
"min": 15379.0615234375,
"max": 43921.01171875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989879.0,
"min": 29921.0,
"max": 989879.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989879.0,
"min": 29921.0,
"max": 989879.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4854337275028229,
"min": -0.0997934639453888,
"max": 0.4942700266838074,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 133.97970581054688,
"min": -23.95043182373047,
"max": 137.90133666992188,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.03163745254278183,
"min": -0.08903004229068756,
"max": 0.3003970682621002,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 8.731937408447266,
"min": -22.168479919433594,
"max": 71.49449920654297,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0700229547875804,
"min": 0.06514275648471533,
"max": 0.07275777092700833,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.050344321813706,
"min": 0.5820621674160666,
"max": 1.0727315721160267,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014101934525468903,
"min": 0.00022611479645474091,
"max": 0.017249060494623717,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21152901788203354,
"min": 0.002939492353911632,
"max": 0.24148684692473205,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.541037486353338e-06,
"min": 7.541037486353338e-06,
"max": 0.00029485035171655,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011311556229530007,
"min": 0.00011311556229530007,
"max": 0.0036091578969474,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251364666666668,
"min": 0.10251364666666668,
"max": 0.19828345,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5377047000000001,
"min": 1.4780487999999996,
"max": 2.56906,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002611133020000002,
"min": 0.0002611133020000002,
"max": 0.009828516655000001,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003916699530000003,
"min": 0.003916699530000003,
"max": 0.12031495474000004,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008687824010848999,
"min": 0.008687824010848999,
"max": 0.3848576545715332,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13031736016273499,
"min": 0.12434162199497223,
"max": 3.0788612365722656,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 363.27710843373495,
"min": 363.27710843373495,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30152.0,
"min": 17231.0,
"max": 32129.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4920915482812618,
"min": -0.9999375534243882,
"max": 1.5864698508058686,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 123.84359850734472,
"min": -31.99800170958042,
"max": 131.6769976168871,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4920915482812618,
"min": -0.9999375534243882,
"max": 1.5864698508058686,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 123.84359850734472,
"min": -31.99800170958042,
"max": 131.6769976168871,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03295600993722868,
"min": 0.03295600993722868,
"max": 7.681839644908905,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.7353488247899804,
"min": 2.553997143317247,
"max": 138.2731136083603,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1710083089",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1710086284"
},
"total": 3195.01157031,
"count": 1,
"self": 0.6300733689995468,
"children": {
"run_training.setup": {
"total": 0.06851419999998143,
"count": 1,
"self": 0.06851419999998143
},
"TrainerController.start_learning": {
"total": 3194.3129827410003,
"count": 1,
"self": 2.2751331170302365,
"children": {
"TrainerController._reset_env": {
"total": 2.65920932899985,
"count": 1,
"self": 2.65920932899985
},
"TrainerController.advance": {
"total": 3189.2779985089705,
"count": 63692,
"self": 2.511178440976437,
"children": {
"env_step": {
"total": 2086.8873754649294,
"count": 63692,
"self": 1919.480604162808,
"children": {
"SubprocessEnvManager._take_step": {
"total": 165.94684915208813,
"count": 63692,
"self": 7.073208622110087,
"children": {
"TorchPolicy.evaluate": {
"total": 158.87364052997805,
"count": 62550,
"self": 158.87364052997805
}
}
},
"workers": {
"total": 1.4599221500332078,
"count": 63692,
"self": 0.0,
"children": {
"worker_root": {
"total": 3187.9435255730295,
"count": 63692,
"is_parallel": true,
"self": 1456.6612255279651,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002442283000164025,
"count": 1,
"is_parallel": true,
"self": 0.0007911520001471217,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016511310000169033,
"count": 8,
"is_parallel": true,
"self": 0.0016511310000169033
}
}
},
"UnityEnvironment.step": {
"total": 0.06408854899996186,
"count": 1,
"is_parallel": true,
"self": 0.0007580870001220319,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00034717799985628517,
"count": 1,
"is_parallel": true,
"self": 0.00034717799985628517
},
"communicator.exchange": {
"total": 0.06098799699998381,
"count": 1,
"is_parallel": true,
"self": 0.06098799699998381
},
"steps_from_proto": {
"total": 0.0019952869999997347,
"count": 1,
"is_parallel": true,
"self": 0.0004401020000841527,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001555184999915582,
"count": 8,
"is_parallel": true,
"self": 0.001555184999915582
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1731.2823000450644,
"count": 63691,
"is_parallel": true,
"self": 51.14428572413635,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 27.494588453021834,
"count": 63691,
"is_parallel": true,
"self": 27.494588453021834
},
"communicator.exchange": {
"total": 1519.7950921569534,
"count": 63691,
"is_parallel": true,
"self": 1519.7950921569534
},
"steps_from_proto": {
"total": 132.84833371095283,
"count": 63691,
"is_parallel": true,
"self": 28.326643442669365,
"children": {
"_process_rank_one_or_two_observation": {
"total": 104.52169026828346,
"count": 509528,
"is_parallel": true,
"self": 104.52169026828346
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1099.8794446030647,
"count": 63692,
"self": 4.565522936073876,
"children": {
"process_trajectory": {
"total": 167.3749685549883,
"count": 63692,
"self": 167.08918786598815,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2857806890001484,
"count": 2,
"self": 0.2857806890001484
}
}
},
"_update_policy": {
"total": 927.9389531120025,
"count": 455,
"self": 366.82047433003936,
"children": {
"TorchPPOOptimizer.update": {
"total": 561.1184787819632,
"count": 22767,
"self": 561.1184787819632
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0009998732130043e-06,
"count": 1,
"self": 1.0009998732130043e-06
},
"TrainerController._save_models": {
"total": 0.10064078499999596,
"count": 1,
"self": 0.002187648000472109,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09845313699952385,
"count": 1,
"self": 0.09845313699952385
}
}
}
}
}
}
}