ppo-PyramidsRND / run_logs /timers.json
Nicolas852's picture
First commit
526bce1 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.17482811212539673,
"min": 0.17330242693424225,
"max": 1.4507393836975098,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 5242.0458984375,
"min": 5151.9345703125,
"max": 44009.62890625,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999924.0,
"min": 29912.0,
"max": 2999924.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999924.0,
"min": 29912.0,
"max": 2999924.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.8132672905921936,
"min": -0.21639706194400787,
"max": 0.8132672905921936,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 242.35365295410156,
"min": -51.286102294921875,
"max": 242.35365295410156,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.010181977413594723,
"min": -0.025289621204137802,
"max": 0.303748220205307,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.034229278564453,
"min": -7.00522518157959,
"max": 73.20332336425781,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06837959499874463,
"min": 0.06452693229667379,
"max": 0.07303278170533242,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0256939249811694,
"min": 0.4836235476113139,
"max": 1.093776802693302,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0130653168740941,
"min": 0.0005115327173492487,
"max": 0.015645201730637107,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1959797531114115,
"min": 0.005115327173492487,
"max": 0.22900681754040042,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.5169661610444478e-06,
"min": 1.5169661610444478e-06,
"max": 0.00029841154338662855,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.2754492415666716e-05,
"min": 2.2754492415666716e-05,
"max": 0.003892477302507633,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10050562222222222,
"min": 0.10050562222222222,
"max": 0.1994705142857143,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5075843333333332,
"min": 1.3962936000000001,
"max": 2.797492366666667,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 6.05116600000001e-05,
"min": 6.05116600000001e-05,
"max": 0.009947104377142857,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0009076749000000014,
"min": 0.0009076749000000014,
"max": 0.12976948743000002,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.004741171840578318,
"min": 0.004624335560947657,
"max": 0.44086360931396484,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.0711175799369812,
"min": 0.06474069505929947,
"max": 3.086045265197754,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 231.9291338582677,
"min": 231.9291338582677,
"max": 998.78125,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29455.0,
"min": 16711.0,
"max": 33014.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7365653442116233,
"min": -0.9372500504832715,
"max": 1.7610959882736206,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 220.54379871487617,
"min": -29.992001615464687,
"max": 221.33199779689312,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7365653442116233,
"min": -0.9372500504832715,
"max": 1.7610959882736206,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 220.54379871487617,
"min": -29.992001615464687,
"max": 221.33199779689312,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.011310607960136464,
"min": 0.011310607960136464,
"max": 8.401019156855696,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.436447210937331,
"min": 1.4348511838979903,
"max": 142.81732566654682,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1706589002",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=PyramidsTraining --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1706595885"
},
"total": 6883.719659685,
"count": 1,
"self": 0.5645091610003874,
"children": {
"run_training.setup": {
"total": 0.07568099499997061,
"count": 1,
"self": 0.07568099499997061
},
"TrainerController.start_learning": {
"total": 6883.079469529,
"count": 1,
"self": 4.229469305236307,
"children": {
"TrainerController._reset_env": {
"total": 3.3904160289999936,
"count": 1,
"self": 3.3904160289999936
},
"TrainerController.advance": {
"total": 6875.370577205763,
"count": 193918,
"self": 4.462017299530999,
"children": {
"env_step": {
"total": 4958.729628320185,
"count": 193918,
"self": 4564.427026540247,
"children": {
"SubprocessEnvManager._take_step": {
"total": 391.6837921569845,
"count": 193918,
"self": 14.520287364885917,
"children": {
"TorchPolicy.evaluate": {
"total": 377.1635047920986,
"count": 187549,
"self": 377.1635047920986
}
}
},
"workers": {
"total": 2.6188096229529947,
"count": 193918,
"self": 0.0,
"children": {
"worker_root": {
"total": 6867.824345605776,
"count": 193918,
"is_parallel": true,
"self": 2657.9586808818176,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004735423000056471,
"count": 1,
"is_parallel": true,
"self": 0.0035325910001802185,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012028319998762527,
"count": 8,
"is_parallel": true,
"self": 0.0012028319998762527
}
}
},
"UnityEnvironment.step": {
"total": 0.08721992300002057,
"count": 1,
"is_parallel": true,
"self": 0.0006619289999889588,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004915220000611953,
"count": 1,
"is_parallel": true,
"self": 0.0004915220000611953
},
"communicator.exchange": {
"total": 0.08447703399997408,
"count": 1,
"is_parallel": true,
"self": 0.08447703399997408
},
"steps_from_proto": {
"total": 0.0015894379999963348,
"count": 1,
"is_parallel": true,
"self": 0.00033306799980437063,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012563700001919642,
"count": 8,
"is_parallel": true,
"self": 0.0012563700001919642
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4209.865664723959,
"count": 193917,
"is_parallel": true,
"self": 108.53745355305182,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 74.26488092700095,
"count": 193917,
"is_parallel": true,
"self": 74.26488092700095
},
"communicator.exchange": {
"total": 3726.0994106950116,
"count": 193917,
"is_parallel": true,
"self": 3726.0994106950116
},
"steps_from_proto": {
"total": 300.96391954889475,
"count": 193917,
"is_parallel": true,
"self": 60.51398081750642,
"children": {
"_process_rank_one_or_two_observation": {
"total": 240.44993873138833,
"count": 1551336,
"is_parallel": true,
"self": 240.44993873138833
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1912.178931586047,
"count": 193918,
"self": 8.843336463985452,
"children": {
"process_trajectory": {
"total": 389.53699774305335,
"count": 193918,
"self": 388.9952704260537,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5417273169996406,
"count": 6,
"self": 0.5417273169996406
}
}
},
"_update_policy": {
"total": 1513.7985973790082,
"count": 1390,
"self": 892.9627504329926,
"children": {
"TorchPPOOptimizer.update": {
"total": 620.8358469460156,
"count": 68442,
"self": 620.8358469460156
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.599998520570807e-07,
"count": 1,
"self": 9.599998520570807e-07
},
"TrainerController._save_models": {
"total": 0.08900602900030208,
"count": 1,
"self": 0.0017166970001198933,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08728933200018218,
"count": 1,
"self": 0.08728933200018218
}
}
}
}
}
}
}