Pyramids / run_logs /timers.json
Arindam1975's picture
First Push
b92a26a
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.9384244680404663,
"min": 0.9384244680404663,
"max": 1.4979941844940186,
"count": 3
},
"Pyramids.Policy.Entropy.sum": {
"value": 28107.689453125,
"min": 28107.689453125,
"max": 45443.15234375,
"count": 3
},
"Pyramids.Step.mean": {
"value": 89932.0,
"min": 29952.0,
"max": 89932.0,
"count": 3
},
"Pyramids.Step.sum": {
"value": 89932.0,
"min": 29952.0,
"max": 89932.0,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.09006470441818237,
"min": -0.09006470441818237,
"max": -0.05955871567130089,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -21.795658111572266,
"min": -21.795658111572266,
"max": -14.115415573120117,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.18890169262886047,
"min": 0.18890169262886047,
"max": 0.2630091607570648,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 45.714210510253906,
"min": 45.714210510253906,
"max": 63.12220001220703,
"count": 3
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06750146815703881,
"min": 0.06750146815703881,
"max": 0.07170572742287984,
"count": 3
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.6075132134133493,
"min": 0.4972169131273738,
"max": 0.6075132134133493,
"count": 3
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.001913338231602507,
"min": 0.0006711106492034387,
"max": 0.007311663421230983,
"count": 3
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.017220044084422563,
"min": 0.004697774544424071,
"max": 0.05118164394861688,
"count": 3
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.353807548733332e-05,
"min": 7.353807548733332e-05,
"max": 0.0002515063018788571,
"count": 3
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0006618426793859999,
"min": 0.0006618426793859999,
"max": 0.0017605441131519997,
"count": 3
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.12451266666666665,
"min": 0.12451266666666665,
"max": 0.1838354285714286,
"count": 3
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.1206139999999998,
"min": 1.0911359999999999,
"max": 1.2868480000000002,
"count": 3
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0024588153999999997,
"min": 0.0024588153999999997,
"max": 0.008385159314285713,
"count": 3
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0221293386,
"min": 0.0221293386,
"max": 0.058696115199999996,
"count": 3
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.1146935522556305,
"min": 0.1146935522556305,
"max": 0.3848760426044464,
"count": 3
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 1.032241940498352,
"min": 1.032241940498352,
"max": 2.6941323280334473,
"count": 3
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 965.0,
"min": 965.0,
"max": 999.0,
"count": 3
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32810.0,
"min": 15984.0,
"max": 32810.0,
"count": 3
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.8482941678341698,
"min": -1.0000000521540642,
"max": -0.8482941678341698,
"count": 3
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -28.84200170636177,
"min": -32.000001668930054,
"max": -16.000000834465027,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.8482941678341698,
"min": -1.0000000521540642,
"max": -0.8482941678341698,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -28.84200170636177,
"min": -32.000001668930054,
"max": -16.000000834465027,
"count": 3
},
"Pyramids.Policy.RndReward.mean": {
"value": 1.282595489612397,
"min": 1.282595489612397,
"max": 7.8693959303200245,
"count": 3
},
"Pyramids.Policy.RndReward.sum": {
"value": 43.6082466468215,
"min": 43.6082466468215,
"max": 125.91033488512039,
"count": 3
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688446931",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1688447238"
},
"total": 306.71329099,
"count": 1,
"self": 0.5929661990000454,
"children": {
"run_training.setup": {
"total": 0.07925935099996195,
"count": 1,
"self": 0.07925935099996195
},
"TrainerController.start_learning": {
"total": 306.04106544,
"count": 1,
"self": 0.22349967699477702,
"children": {
"TrainerController._reset_env": {
"total": 2.4367131839999843,
"count": 1,
"self": 2.4367131839999843
},
"TrainerController.advance": {
"total": 303.1775318450052,
"count": 6274,
"self": 0.2226084700089359,
"children": {
"env_step": {
"total": 181.76730748699606,
"count": 6274,
"self": 167.47458686099338,
"children": {
"SubprocessEnvManager._take_step": {
"total": 14.16453502399645,
"count": 6274,
"self": 0.7265861139967456,
"children": {
"TorchPolicy.evaluate": {
"total": 13.437948909999704,
"count": 6273,
"self": 13.437948909999704
}
}
},
"workers": {
"total": 0.12818560200622642,
"count": 6274,
"self": 0.0,
"children": {
"worker_root": {
"total": 305.22412085699875,
"count": 6274,
"is_parallel": true,
"self": 154.28084327300417,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.009090148000041154,
"count": 1,
"is_parallel": true,
"self": 0.00658057300006476,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002509574999976394,
"count": 8,
"is_parallel": true,
"self": 0.002509574999976394
}
}
},
"UnityEnvironment.step": {
"total": 0.1149101880000103,
"count": 1,
"is_parallel": true,
"self": 0.0007011060000650104,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005416999999852123,
"count": 1,
"is_parallel": true,
"self": 0.0005416999999852123
},
"communicator.exchange": {
"total": 0.11118800499997405,
"count": 1,
"is_parallel": true,
"self": 0.11118800499997405
},
"steps_from_proto": {
"total": 0.002479376999986016,
"count": 1,
"is_parallel": true,
"self": 0.0005794629998945311,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001899914000091485,
"count": 8,
"is_parallel": true,
"self": 0.001899914000091485
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 150.9432775839946,
"count": 6273,
"is_parallel": true,
"self": 4.536070188996064,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.499008301996014,
"count": 6273,
"is_parallel": true,
"self": 2.499008301996014
},
"communicator.exchange": {
"total": 129.7867365439971,
"count": 6273,
"is_parallel": true,
"self": 129.7867365439971
},
"steps_from_proto": {
"total": 14.1214625490054,
"count": 6273,
"is_parallel": true,
"self": 2.9101510599992935,
"children": {
"_process_rank_one_or_two_observation": {
"total": 11.211311489006107,
"count": 50184,
"is_parallel": true,
"self": 11.211311489006107
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 121.18761588800021,
"count": 6274,
"self": 0.2707400959995425,
"children": {
"process_trajectory": {
"total": 14.648106904000883,
"count": 6274,
"self": 14.648106904000883
},
"_update_policy": {
"total": 106.26876888799978,
"count": 27,
"self": 44.46207379600054,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.806695091999245,
"count": 2337,
"self": 61.806695091999245
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0620000239214278e-06,
"count": 1,
"self": 1.0620000239214278e-06
},
"TrainerController._save_models": {
"total": 0.20331967200002055,
"count": 1,
"self": 0.0017556590000822325,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2015640129999383,
"count": 1,
"self": 0.2015640129999383
}
}
}
}
}
}
}