Pyramids / run_logs /timers.json
husseinmo's picture
First Push
704a7b5 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4866746962070465,
"min": 0.4819784462451935,
"max": 1.4704461097717285,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14475.65234375,
"min": 14475.65234375,
"max": 44607.453125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989952.0,
"min": 29952.0,
"max": 989952.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989952.0,
"min": 29952.0,
"max": 989952.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4255790710449219,
"min": -0.1660817712545395,
"max": 0.483580619096756,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 112.77845001220703,
"min": -39.36138153076172,
"max": 129.599609375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.009613361209630966,
"min": -0.009613361209630966,
"max": 0.4051348865032196,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -2.5475406646728516,
"min": -2.5475406646728516,
"max": 96.0169677734375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07023399615995697,
"min": 0.06478494070470334,
"max": 0.0744606949231967,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9832759462393975,
"min": 0.45733628309520513,
"max": 1.0424497289247536,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013792137313603667,
"min": 0.0007647788233143253,
"max": 0.01483539521692716,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19308992239045134,
"min": 0.010706903526400554,
"max": 0.20769553303698024,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.629890313878567e-06,
"min": 7.629890313878567e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010681846439429994,
"min": 0.00010681846439429994,
"max": 0.0036327121890960003,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1025432642857143,
"min": 0.1025432642857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4356057000000002,
"min": 1.3886848,
"max": 2.610904,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002640721021428571,
"min": 0.0002640721021428571,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036970094299999994,
"min": 0.0036970094299999994,
"max": 0.1211093096,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010788388550281525,
"min": 0.010788388550281525,
"max": 0.46336987614631653,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15103743970394135,
"min": 0.15103743970394135,
"max": 3.243589162826538,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 420.83561643835617,
"min": 389.6625,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30721.0,
"min": 15984.0,
"max": 33115.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3815971946136818,
"min": -1.0000000521540642,
"max": 1.5290675501565676,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 99.4749980121851,
"min": -29.14700174331665,
"max": 113.19119822978973,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3815971946136818,
"min": -1.0000000521540642,
"max": 1.5290675501565676,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 99.4749980121851,
"min": -29.14700174331665,
"max": 113.19119822978973,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04753646299327051,
"min": 0.04753646299327051,
"max": 8.934927144087851,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.4226253355154768,
"min": 3.4226253355154768,
"max": 142.95883430540562,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1732984396",
"python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=PyramidsTraining --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1732987013"
},
"total": 2617.1157349419996,
"count": 1,
"self": 0.5266976519997115,
"children": {
"run_training.setup": {
"total": 0.06800029999976687,
"count": 1,
"self": 0.06800029999976687
},
"TrainerController.start_learning": {
"total": 2616.52103699,
"count": 1,
"self": 1.985749643987674,
"children": {
"TrainerController._reset_env": {
"total": 2.6431613049999214,
"count": 1,
"self": 2.6431613049999214
},
"TrainerController.advance": {
"total": 2611.8042161170133,
"count": 63664,
"self": 1.9591416860143909,
"children": {
"env_step": {
"total": 1841.454075376043,
"count": 63664,
"self": 1651.2710671001705,
"children": {
"SubprocessEnvManager._take_step": {
"total": 189.0558243478995,
"count": 63664,
"self": 5.7667154839714385,
"children": {
"TorchPolicy.evaluate": {
"total": 183.28910886392805,
"count": 62558,
"self": 183.28910886392805
}
}
},
"workers": {
"total": 1.1271839279729647,
"count": 63664,
"self": 0.0,
"children": {
"worker_root": {
"total": 2610.3318086820123,
"count": 63664,
"is_parallel": true,
"self": 1106.3666766211027,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0025634969997554435,
"count": 1,
"is_parallel": true,
"self": 0.0008216629998969438,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017418339998584997,
"count": 8,
"is_parallel": true,
"self": 0.0017418339998584997
}
}
},
"UnityEnvironment.step": {
"total": 0.11687405500015302,
"count": 1,
"is_parallel": true,
"self": 0.0007762019995425362,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005951360003564332,
"count": 1,
"is_parallel": true,
"self": 0.0005951360003564332
},
"communicator.exchange": {
"total": 0.11346911100008583,
"count": 1,
"is_parallel": true,
"self": 0.11346911100008583
},
"steps_from_proto": {
"total": 0.00203360600016822,
"count": 1,
"is_parallel": true,
"self": 0.00044513300053949933,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015884729996287206,
"count": 8,
"is_parallel": true,
"self": 0.0015884729996287206
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1503.9651320609096,
"count": 63663,
"is_parallel": true,
"self": 40.22273428909784,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 27.219916052012195,
"count": 63663,
"is_parallel": true,
"self": 27.219916052012195
},
"communicator.exchange": {
"total": 1318.250295694967,
"count": 63663,
"is_parallel": true,
"self": 1318.250295694967
},
"steps_from_proto": {
"total": 118.27218602483254,
"count": 63663,
"is_parallel": true,
"self": 25.872104360689264,
"children": {
"_process_rank_one_or_two_observation": {
"total": 92.40008166414327,
"count": 509304,
"is_parallel": true,
"self": 92.40008166414327
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 768.390999054956,
"count": 63664,
"self": 3.5292332749422712,
"children": {
"process_trajectory": {
"total": 152.5426576420141,
"count": 63664,
"self": 152.30593538701442,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2367222549996768,
"count": 2,
"self": 0.2367222549996768
}
}
},
"_update_policy": {
"total": 612.3191081379996,
"count": 452,
"self": 343.44890062799277,
"children": {
"TorchPPOOptimizer.update": {
"total": 268.8702075100068,
"count": 22800,
"self": 268.8702075100068
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.739997040014714e-07,
"count": 1,
"self": 9.739997040014714e-07
},
"TrainerController._save_models": {
"total": 0.0879089499994734,
"count": 1,
"self": 0.001435447999938333,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08647350199953507,
"count": 1,
"self": 0.08647350199953507
}
}
}
}
}
}
}