ppo-pyramids / run_logs /timers.json
zpattdev's picture
first!
22e7144
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2759353518486023,
"min": 0.2738838791847229,
"max": 1.4007776975631714,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 8322.2099609375,
"min": 8190.2236328125,
"max": 42493.9921875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989963.0,
"min": 29952.0,
"max": 989963.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989963.0,
"min": 29952.0,
"max": 989963.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5887129902839661,
"min": -0.08538976311683655,
"max": 0.5887129902839661,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 166.60577392578125,
"min": -20.664321899414062,
"max": 166.60577392578125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.004859808832406998,
"min": 0.002310534007847309,
"max": 0.5792493224143982,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.3753259181976318,
"min": 0.6169126033782959,
"max": 137.28208923339844,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06883131388493478,
"min": 0.06580849104042912,
"max": 0.07360820248351227,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9636383943890869,
"min": 0.5152574173845859,
"max": 1.082100816548108,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015726315898777387,
"min": 0.0012831392523557884,
"max": 0.016817789304537957,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22016842258288344,
"min": 0.015397671028269461,
"max": 0.23544905026353138,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.369554686371431e-06,
"min": 7.369554686371431e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010317376560920003,
"min": 0.00010317376560920003,
"max": 0.0036345745884752002,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10245648571428571,
"min": 0.10245648571428571,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4343908,
"min": 1.3886848,
"max": 2.6115248,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002554029228571429,
"min": 0.0002554029228571429,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035756409200000012,
"min": 0.0035756409200000012,
"max": 0.12117132752,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013257576152682304,
"min": 0.013257576152682304,
"max": 0.5651046633720398,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.18560606241226196,
"min": 0.18560606241226196,
"max": 3.955732583999634,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 324.6923076923077,
"min": 324.6923076923077,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29547.0,
"min": 15984.0,
"max": 32458.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6093494349292345,
"min": -1.0000000521540642,
"max": 1.6093494349292345,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 146.45079857856035,
"min": -28.488801673054695,
"max": 146.45079857856035,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6093494349292345,
"min": -1.0000000521540642,
"max": 1.6093494349292345,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 146.45079857856035,
"min": -28.488801673054695,
"max": 146.45079857856035,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.043969357678229196,
"min": 0.043969357678229196,
"max": 11.398890811018646,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.001211548718857,
"min": 4.001211548718857,
"max": 182.38225297629833,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690507580",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690509742"
},
"total": 2161.773462022,
"count": 1,
"self": 0.5894257859999925,
"children": {
"run_training.setup": {
"total": 0.03840401999991627,
"count": 1,
"self": 0.03840401999991627
},
"TrainerController.start_learning": {
"total": 2161.145632216,
"count": 1,
"self": 1.353444738962935,
"children": {
"TrainerController._reset_env": {
"total": 4.012786652000159,
"count": 1,
"self": 4.012786652000159
},
"TrainerController.advance": {
"total": 2155.680792736037,
"count": 63907,
"self": 1.3609897580608958,
"children": {
"env_step": {
"total": 1508.8510754899412,
"count": 63907,
"self": 1402.4264663779663,
"children": {
"SubprocessEnvManager._take_step": {
"total": 105.6250980929492,
"count": 63907,
"self": 4.540409812962707,
"children": {
"TorchPolicy.evaluate": {
"total": 101.0846882799865,
"count": 62553,
"self": 101.0846882799865
}
}
},
"workers": {
"total": 0.7995110190256582,
"count": 63907,
"self": 0.0,
"children": {
"worker_root": {
"total": 2156.31494363302,
"count": 63907,
"is_parallel": true,
"self": 864.8662725510624,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0024115359999541397,
"count": 1,
"is_parallel": true,
"self": 0.0006472209997809841,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017643150001731556,
"count": 8,
"is_parallel": true,
"self": 0.0017643150001731556
}
}
},
"UnityEnvironment.step": {
"total": 0.04694311900016146,
"count": 1,
"is_parallel": true,
"self": 0.0005710440000257222,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00046995100001367973,
"count": 1,
"is_parallel": true,
"self": 0.00046995100001367973
},
"communicator.exchange": {
"total": 0.044112559000041074,
"count": 1,
"is_parallel": true,
"self": 0.044112559000041074
},
"steps_from_proto": {
"total": 0.0017895650000809837,
"count": 1,
"is_parallel": true,
"self": 0.0003824209995855199,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014071440004954638,
"count": 8,
"is_parallel": true,
"self": 0.0014071440004954638
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1291.4486710819576,
"count": 63906,
"is_parallel": true,
"self": 33.63725145086846,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.566707468992263,
"count": 63906,
"is_parallel": true,
"self": 22.566707468992263
},
"communicator.exchange": {
"total": 1135.991504547101,
"count": 63906,
"is_parallel": true,
"self": 1135.991504547101
},
"steps_from_proto": {
"total": 99.25320761499597,
"count": 63906,
"is_parallel": true,
"self": 19.912108079087375,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.3410995359086,
"count": 511248,
"is_parallel": true,
"self": 79.3410995359086
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 645.4687274880348,
"count": 63907,
"self": 2.5904591120852274,
"children": {
"process_trajectory": {
"total": 106.07763099494605,
"count": 63907,
"self": 105.81393973294553,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2636912620005205,
"count": 2,
"self": 0.2636912620005205
}
}
},
"_update_policy": {
"total": 536.8006373810035,
"count": 457,
"self": 349.0846701809969,
"children": {
"TorchPPOOptimizer.update": {
"total": 187.71596720000662,
"count": 22782,
"self": 187.71596720000662
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.310001587437e-07,
"count": 1,
"self": 9.310001587437e-07
},
"TrainerController._save_models": {
"total": 0.09860715800004982,
"count": 1,
"self": 0.0016611959999863757,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09694596200006345,
"count": 1,
"self": 0.09694596200006345
}
}
}
}
}
}
}