Pyramid / run_logs /timers.json
ItchyB's picture
seconds run 3000000
ba5eb51
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.21478991210460663,
"min": 0.20587411522865295,
"max": 0.6635094881057739,
"count": 67
},
"Pyramids.Policy.Entropy.sum": {
"value": 6319.978515625,
"min": 6199.28125,
"max": 19681.1171875,
"count": 67
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 202.09923664122138,
"min": 170.33333333333334,
"max": 324.7980769230769,
"count": 67
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 26475.0,
"min": 7154.0,
"max": 34209.0,
"count": 67
},
"Pyramids.Step.mean": {
"value": 2999953.0,
"min": 1019985.0,
"max": 2999953.0,
"count": 67
},
"Pyramids.Step.sum": {
"value": 2999953.0,
"min": 1019985.0,
"max": 2999953.0,
"count": 67
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.8460010290145874,
"min": 0.3994930684566498,
"max": 0.8886299133300781,
"count": 67
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 256.33831787109375,
"min": 69.91128540039062,
"max": 272.80938720703125,
"count": 67
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.007474419195204973,
"min": -0.0037378580309450626,
"max": 0.14753714203834534,
"count": 67
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.264749050140381,
"min": -1.1325709819793701,
"max": 41.162864685058594,
"count": 67
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.781787680891844,
"min": 1.5963942678911345,
"max": 1.8296666578167962,
"count": 67
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 231.6323985159397,
"min": 76.84599962830544,
"max": 271.7899979650974,
"count": 67
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.781787680891844,
"min": 1.5963942678911345,
"max": 1.8296666578167962,
"count": 67
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 231.6323985159397,
"min": 76.84599962830544,
"max": 271.7899979650974,
"count": 67
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.01764767567085353,
"min": 0.01764767567085353,
"max": 0.0433384377005831,
"count": 67
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.294197837210959,
"min": 1.0528611817280762,
"max": 4.4638590831600595,
"count": 67
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07035031930100792,
"min": 0.06295160259208538,
"max": 0.07553702230152416,
"count": 67
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.984904470214111,
"min": 0.49356065462367016,
"max": 1.0765698625045306,
"count": 67
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015417234234564137,
"min": 0.010701739972919974,
"max": 0.01728356775570527,
"count": 67
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21584127928389793,
"min": 0.07491217981043982,
"max": 0.259253516335579,
"count": 67
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.5107137821761932e-06,
"min": 1.5107137821761932e-06,
"max": 0.00019898423367193334,
"count": 67
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.1149992950466706e-05,
"min": 2.1149992950466706e-05,
"max": 0.0027086627971125666,
"count": 67
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10050353809523813,
"min": 0.10050353809523813,
"max": 0.1663280666666667,
"count": 67
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4070495333333337,
"min": 1.1642964666666669,
"max": 2.3028874333333333,
"count": 67
},
"Pyramids.Policy.Beta.mean": {
"value": 6.0303455714285815e-05,
"min": 6.0303455714285815e-05,
"max": 0.00663617386,
"count": 67
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008442483800000014,
"min": 0.0008442483800000014,
"max": 0.09033845459,
"count": 67
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008308643475174904,
"min": 0.008308643475174904,
"max": 0.01302468590438366,
"count": 67
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11632100492715836,
"min": 0.09067335724830627,
"max": 0.18112194538116455,
"count": 67
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 67
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 67
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682280256",
"python_version": "3.9.16 | packaged by conda-forge | (main, Feb 1 2023, 21:39:03) \n[GCC 11.3.0]",
"command_line_arguments": "/home/byron/miniconda3/envs/torch/bin/mlagents-learn ./ml-agents/config/ppo/PyramidsRND.yaml --env=./ml-agents/training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids-Training2 --no-graphics --torch-device=cuda:0 --num-envs 8 --num-areas 8 --resume --env-args -batchmode",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cu117",
"numpy_version": "1.21.2",
"end_time_seconds": "1682282200"
},
"total": 1943.9294684159977,
"count": 1,
"self": 0.6761621149926214,
"children": {
"run_training.setup": {
"total": 0.028145499003585428,
"count": 1,
"self": 0.028145499003585428
},
"TrainerController.start_learning": {
"total": 1943.2251608020015,
"count": 1,
"self": 1.474775712544215,
"children": {
"TrainerController._reset_env": {
"total": 13.508077049002168,
"count": 1,
"self": 13.508077049002168
},
"TrainerController.advance": {
"total": 1928.159975061455,
"count": 42957,
"self": 1.15854264931113,
"children": {
"env_step": {
"total": 477.5357708391384,
"count": 42957,
"self": 131.00419527236954,
"children": {
"SubprocessEnvManager._take_step": {
"total": 345.3808232630254,
"count": 131887,
"self": 8.298781538120238,
"children": {
"TorchPolicy.evaluate": {
"total": 337.08204172490514,
"count": 125436,
"self": 337.08204172490514
}
}
},
"workers": {
"total": 1.1507523037435021,
"count": 42957,
"self": 0.0,
"children": {
"worker_root": {
"total": 15537.698560449106,
"count": 131883,
"is_parallel": true,
"self": 13232.392022320244,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00879246297699865,
"count": 8,
"is_parallel": true,
"self": 0.0027776409406214952,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.006014822036377154,
"count": 64,
"is_parallel": true,
"self": 0.006014822036377154
}
}
},
"UnityEnvironment.step": {
"total": 0.38589809798577335,
"count": 8,
"is_parallel": true,
"self": 0.003574172980734147,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.002722707999055274,
"count": 8,
"is_parallel": true,
"self": 0.002722707999055274
},
"communicator.exchange": {
"total": 0.3683473840064835,
"count": 8,
"is_parallel": true,
"self": 0.3683473840064835
},
"steps_from_proto": {
"total": 0.011253832999500446,
"count": 8,
"is_parallel": true,
"self": 0.003931127008399926,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00732270599110052,
"count": 64,
"is_parallel": true,
"self": 0.00732270599110052
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2305.3065381288616,
"count": 131875,
"is_parallel": true,
"self": 38.64903408114333,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 32.81288378901081,
"count": 131875,
"is_parallel": true,
"self": 32.81288378901081
},
"communicator.exchange": {
"total": 2126.858278217769,
"count": 131875,
"is_parallel": true,
"self": 2126.858278217769
},
"steps_from_proto": {
"total": 106.98634204093833,
"count": 131875,
"is_parallel": true,
"self": 26.78200793697033,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.204334103968,
"count": 1055000,
"is_parallel": true,
"self": 80.204334103968
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1449.4656615730055,
"count": 42957,
"self": 3.6081198034225963,
"children": {
"process_trajectory": {
"total": 269.627192241649,
"count": 42957,
"self": 269.29844516263984,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3287470790091902,
"count": 4,
"self": 0.3287470790091902
}
}
},
"_update_policy": {
"total": 1176.2303495279339,
"count": 935,
"self": 592.5732438604755,
"children": {
"TorchPPOOptimizer.update": {
"total": 583.6571056674584,
"count": 45576,
"self": 583.6571056674584
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.500057108700275e-07,
"count": 1,
"self": 7.500057108700275e-07
},
"TrainerController._save_models": {
"total": 0.08233222899434622,
"count": 1,
"self": 0.0014813790039625019,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08085084999038372,
"count": 1,
"self": 0.08085084999038372
}
}
}
}
}
}
}