ppo-PyramidsRND / run_logs /timers.json
alperenunlu's picture
First Push
7470140
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6040499806404114,
"min": 0.6040499806404114,
"max": 1.4344218969345093,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 18005.521484375,
"min": 18005.521484375,
"max": 43514.62109375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989903.0,
"min": 29952.0,
"max": 989903.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989903.0,
"min": 29952.0,
"max": 989903.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.23428134620189667,
"min": -0.11501607298851013,
"max": 0.23428134620189667,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 60.210304260253906,
"min": -27.718873977661133,
"max": 60.210304260253906,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.020223131403326988,
"min": 0.016001379117369652,
"max": 0.4314071536064148,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.197344779968262,
"min": 4.000344753265381,
"max": 102.24349212646484,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0685230177685818,
"min": 0.06413871610333526,
"max": 0.07227788327561159,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9593222487601453,
"min": 0.4811417650730461,
"max": 1.0406347062249264,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.010716137538717401,
"min": 8.704494213708378e-05,
"max": 0.010716137538717401,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1500259255420436,
"min": 0.001131584247782089,
"max": 0.1500259255420436,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.2348333027071426e-06,
"min": 7.2348333027071426e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001012876662379,
"min": 0.0001012876662379,
"max": 0.003507213230929,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10241157857142857,
"min": 0.10241157857142857,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4337621,
"min": 1.3691136000000002,
"max": 2.569071,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025091669928571425,
"min": 0.00025091669928571425,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00351283379,
"min": 0.00351283379,
"max": 0.11693019290000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.014180599711835384,
"min": 0.01354063581675291,
"max": 0.5825592875480652,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.19852839410305023,
"min": 0.19852839410305023,
"max": 4.077915191650391,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 616.2115384615385,
"min": 606.9183673469388,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32043.0,
"min": 15984.0,
"max": 33539.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.03754227493818,
"min": -1.0000000521540642,
"max": 1.03754227493818,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 53.952198296785355,
"min": -32.000001668930054,
"max": 53.952198296785355,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.03754227493818,
"min": -1.0000000521540642,
"max": 1.03754227493818,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 53.952198296785355,
"min": -32.000001668930054,
"max": 53.952198296785355,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.09058949579551924,
"min": 0.09058949579551924,
"max": 11.825374035164714,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.710653781367,
"min": 3.853030671714805,
"max": 189.20598456263542,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1695763028",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1695765235"
},
"total": 2206.981667421,
"count": 1,
"self": 0.957145588000003,
"children": {
"run_training.setup": {
"total": 0.0728047699999479,
"count": 1,
"self": 0.0728047699999479
},
"TrainerController.start_learning": {
"total": 2205.951717063,
"count": 1,
"self": 1.5415772660408038,
"children": {
"TrainerController._reset_env": {
"total": 4.758411872000124,
"count": 1,
"self": 4.758411872000124
},
"TrainerController.advance": {
"total": 2199.502189022959,
"count": 63291,
"self": 1.518514233967835,
"children": {
"env_step": {
"total": 1512.489073835,
"count": 63291,
"self": 1396.091206318931,
"children": {
"SubprocessEnvManager._take_step": {
"total": 115.51606335805832,
"count": 63291,
"self": 4.929427089034789,
"children": {
"TorchPolicy.evaluate": {
"total": 110.58663626902353,
"count": 62564,
"self": 110.58663626902353
}
}
},
"workers": {
"total": 0.8818041580107092,
"count": 63291,
"self": 0.0,
"children": {
"worker_root": {
"total": 2200.8154210799535,
"count": 63291,
"is_parallel": true,
"self": 923.1510119389854,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0028544130000227597,
"count": 1,
"is_parallel": true,
"self": 0.0008039349997943646,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002050478000228395,
"count": 8,
"is_parallel": true,
"self": 0.002050478000228395
}
}
},
"UnityEnvironment.step": {
"total": 0.05466871099997661,
"count": 1,
"is_parallel": true,
"self": 0.0006148120000943891,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005015569997794955,
"count": 1,
"is_parallel": true,
"self": 0.0005015569997794955
},
"communicator.exchange": {
"total": 0.05074784300018109,
"count": 1,
"is_parallel": true,
"self": 0.05074784300018109
},
"steps_from_proto": {
"total": 0.002804498999921634,
"count": 1,
"is_parallel": true,
"self": 0.0003770220000660629,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002427476999855571,
"count": 8,
"is_parallel": true,
"self": 0.002427476999855571
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1277.664409140968,
"count": 63290,
"is_parallel": true,
"self": 35.04196068901683,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.62137261002772,
"count": 63290,
"is_parallel": true,
"self": 23.62137261002772
},
"communicator.exchange": {
"total": 1110.186922530964,
"count": 63290,
"is_parallel": true,
"self": 1110.186922530964
},
"steps_from_proto": {
"total": 108.81415331095945,
"count": 63290,
"is_parallel": true,
"self": 21.403086217943383,
"children": {
"_process_rank_one_or_two_observation": {
"total": 87.41106709301607,
"count": 506320,
"is_parallel": true,
"self": 87.41106709301607
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 685.4946009539915,
"count": 63291,
"self": 2.697104438060478,
"children": {
"process_trajectory": {
"total": 113.75691407293039,
"count": 63291,
"self": 113.4326793249304,
"children": {
"RLTrainer._checkpoint": {
"total": 0.32423474799998075,
"count": 2,
"self": 0.32423474799998075
}
}
},
"_update_policy": {
"total": 569.0405824430006,
"count": 438,
"self": 373.0844591689656,
"children": {
"TorchPPOOptimizer.update": {
"total": 195.956123274035,
"count": 22818,
"self": 195.956123274035
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3839999155607074e-06,
"count": 1,
"self": 1.3839999155607074e-06
},
"TrainerController._save_models": {
"total": 0.14953751800021564,
"count": 1,
"self": 0.0018929850002677995,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14764453299994784,
"count": 1,
"self": 0.14764453299994784
}
}
}
}
}
}
}