ppo-PyramidsRND / run_logs /timers.json
krabhi's picture
First Push
9e681a1
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2330177128314972,
"min": 0.2330177128314972,
"max": 1.5512512922286987,
"count": 68
},
"Pyramids.Policy.Entropy.sum": {
"value": 6964.43359375,
"min": 6964.43359375,
"max": 47058.7578125,
"count": 68
},
"Pyramids.Step.mean": {
"value": 2039894.0,
"min": 29952.0,
"max": 2039894.0,
"count": 68
},
"Pyramids.Step.sum": {
"value": 2039894.0,
"min": 29952.0,
"max": 2039894.0,
"count": 68
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6805641651153564,
"min": -0.12414652854204178,
"max": 0.7525332570075989,
"count": 68
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 194.641357421875,
"min": -29.795166015625,
"max": 220.49224853515625,
"count": 68
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.019790425896644592,
"min": -0.0011523724533617496,
"max": 0.5536867380142212,
"count": 68
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.660061836242676,
"min": -0.285788357257843,
"max": 131.2237548828125,
"count": 68
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06615853581320087,
"min": 0.06239686218068833,
"max": 0.07503146900815112,
"count": 68
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9262195013848122,
"min": 0.48613608605797065,
"max": 1.1066972581480228,
"count": 68
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.011956495561093062,
"min": 0.00043928236265134633,
"max": 0.014656954068816577,
"count": 68
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.16739093785530287,
"min": 0.003074976538559424,
"max": 0.215170973877927,
"count": 68
},
"Pyramids.Policy.LearningRate.mean": {
"value": 9.755724605236905e-05,
"min": 9.755724605236905e-05,
"max": 0.00029838354339596195,
"count": 68
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0013658014447331666,
"min": 0.0013658014447331666,
"max": 0.003969015376994899,
"count": 68
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.13251905952380955,
"min": 0.13251905952380955,
"max": 0.19946118095238097,
"count": 68
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.8552668333333335,
"min": 1.3757013333333334,
"max": 2.7374533000000003,
"count": 68
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0032586540464285716,
"min": 0.0032586540464285716,
"max": 0.009946171977142856,
"count": 68
},
"Pyramids.Policy.Beta.sum": {
"value": 0.04562115665,
"min": 0.04562115665,
"max": 0.13230820949000002,
"count": 68
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.004568756558001041,
"min": 0.004568756558001041,
"max": 0.36913421750068665,
"count": 68
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.06396259367465973,
"min": 0.06396259367465973,
"max": 2.583939552307129,
"count": 68
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 299.95283018867923,
"min": 271.7567567567568,
"max": 999.0,
"count": 68
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31795.0,
"min": 15984.0,
"max": 33439.0,
"count": 68
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6450373704466865,
"min": -1.0000000521540642,
"max": 1.7113711185676534,
"count": 68
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 176.01899863779545,
"min": -32.000001668930054,
"max": 188.01859804987907,
"count": 68
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6450373704466865,
"min": -1.0000000521540642,
"max": 1.7113711185676534,
"count": 68
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 176.01899863779545,
"min": -32.000001668930054,
"max": 188.01859804987907,
"count": 68
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.014316193433859273,
"min": 0.01361969543676506,
"max": 7.536140745505691,
"count": 68
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.5318326974229421,
"min": 1.3693665615282953,
"max": 120.57825192809105,
"count": 68
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 68
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 68
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1704550582",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1704555306"
},
"total": 4724.239417803,
"count": 1,
"self": 0.773212393999529,
"children": {
"run_training.setup": {
"total": 0.052989432000003944,
"count": 1,
"self": 0.052989432000003944
},
"TrainerController.start_learning": {
"total": 4723.413215977001,
"count": 1,
"self": 3.0437041291388596,
"children": {
"TrainerController._reset_env": {
"total": 3.800391533000038,
"count": 1,
"self": 3.800391533000038
},
"TrainerController.advance": {
"total": 4716.394457441862,
"count": 131675,
"self": 3.095794261955234,
"children": {
"env_step": {
"total": 3376.7390581370782,
"count": 131675,
"self": 3101.5926492621015,
"children": {
"SubprocessEnvManager._take_step": {
"total": 273.3280973479481,
"count": 131675,
"self": 10.053644099019493,
"children": {
"TorchPolicy.evaluate": {
"total": 263.2744532489286,
"count": 128232,
"self": 263.2744532489286
}
}
},
"workers": {
"total": 1.818311527028527,
"count": 131675,
"self": 0.0,
"children": {
"worker_root": {
"total": 4711.869233105034,
"count": 131675,
"is_parallel": true,
"self": 1863.936440574972,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0039018779999651088,
"count": 1,
"is_parallel": true,
"self": 0.0027550999999448322,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011467780000202765,
"count": 8,
"is_parallel": true,
"self": 0.0011467780000202765
}
}
},
"UnityEnvironment.step": {
"total": 0.0823522859999457,
"count": 1,
"is_parallel": true,
"self": 0.0006416300000182673,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005244599999514321,
"count": 1,
"is_parallel": true,
"self": 0.0005244599999514321
},
"communicator.exchange": {
"total": 0.07933476699997755,
"count": 1,
"is_parallel": true,
"self": 0.07933476699997755
},
"steps_from_proto": {
"total": 0.0018514289999984612,
"count": 1,
"is_parallel": true,
"self": 0.0003998850000925813,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00145154399990588,
"count": 8,
"is_parallel": true,
"self": 0.00145154399990588
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2847.932792530062,
"count": 131674,
"is_parallel": true,
"self": 75.42222613039667,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 51.01209129894585,
"count": 131674,
"is_parallel": true,
"self": 51.01209129894585
},
"communicator.exchange": {
"total": 2509.1681420298337,
"count": 131674,
"is_parallel": true,
"self": 2509.1681420298337
},
"steps_from_proto": {
"total": 212.3303330708859,
"count": 131674,
"is_parallel": true,
"self": 42.927148922235574,
"children": {
"_process_rank_one_or_two_observation": {
"total": 169.40318414865033,
"count": 1053392,
"is_parallel": true,
"self": 169.40318414865033
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1336.5596050428283,
"count": 131675,
"self": 5.902707291743809,
"children": {
"process_trajectory": {
"total": 269.95778163809155,
"count": 131675,
"self": 269.58117030009157,
"children": {
"RLTrainer._checkpoint": {
"total": 0.37661133799997515,
"count": 4,
"self": 0.37661133799997515
}
}
},
"_update_policy": {
"total": 1060.699116112993,
"count": 936,
"self": 629.559274961939,
"children": {
"TorchPPOOptimizer.update": {
"total": 431.13984115105393,
"count": 46765,
"self": 431.13984115105393
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1610000001383014e-06,
"count": 1,
"self": 1.1610000001383014e-06
},
"TrainerController._save_models": {
"total": 0.17466171199976088,
"count": 1,
"self": 0.0020275679999031126,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17263414399985777,
"count": 1,
"self": 0.17263414399985777
}
}
}
}
}
}
}