ppo-Pyramids / run_logs /timers.json
Fetanos's picture
First push
23ab4b9 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.36168980598449707,
"min": 0.332235723733902,
"max": 1.3537042140960693,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10781.25,
"min": 9866.072265625,
"max": 41065.97265625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989960.0,
"min": 29952.0,
"max": 989960.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989960.0,
"min": 29952.0,
"max": 989960.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3709689676761627,
"min": -0.08730626106262207,
"max": 0.3955533504486084,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 98.30677795410156,
"min": -20.953502655029297,
"max": 106.40385437011719,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.03553295508027077,
"min": 0.0022034409921616316,
"max": 0.3950938880443573,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 9.41623306274414,
"min": 0.5486568212509155,
"max": 95.2176284790039,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07085979027102614,
"min": 0.06470947051927475,
"max": 0.07338828907687235,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9920370637943658,
"min": 0.5137180235381065,
"max": 1.0800788726779749,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013889445968656914,
"min": 0.0009313894743617715,
"max": 0.014136260964894768,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19445224356119678,
"min": 0.012108063166703029,
"max": 0.21204391447342152,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.379283254557141e-06,
"min": 7.379283254557141e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010330996556379997,
"min": 0.00010330996556379997,
"max": 0.0036339835886722,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10245972857142857,
"min": 0.10245972857142857,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4344362,
"min": 1.3886848,
"max": 2.6113278,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025572688428571427,
"min": 0.00025572688428571427,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00358017638,
"min": 0.00358017638,
"max": 0.12115164721999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.016356972977519035,
"min": 0.016356972977519035,
"max": 0.6327592134475708,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2289976328611374,
"min": 0.2289976328611374,
"max": 4.429314613342285,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 473.1587301587302,
"min": 459.8840579710145,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29809.0,
"min": 15984.0,
"max": 33729.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3045523603047644,
"min": -1.0000000521540642,
"max": 1.4239133098473151,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 82.18679869920015,
"min": -29.23980161547661,
"max": 92.26279878616333,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3045523603047644,
"min": -1.0000000521540642,
"max": 1.4239133098473151,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 82.18679869920015,
"min": -29.23980161547661,
"max": 92.26279878616333,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08033744213707184,
"min": 0.08033744213707184,
"max": 12.267523353919387,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.0612588546355255,
"min": 5.0612588546355255,
"max": 196.2803736627102,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1715773845",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1715777300"
},
"total": 3454.290163313,
"count": 1,
"self": 0.6893238699999529,
"children": {
"run_training.setup": {
"total": 0.06619674100011252,
"count": 1,
"self": 0.06619674100011252
},
"TrainerController.start_learning": {
"total": 3453.534642702,
"count": 1,
"self": 2.6274057308264673,
"children": {
"TrainerController._reset_env": {
"total": 2.9811125359997277,
"count": 1,
"self": 2.9811125359997277
},
"TrainerController.advance": {
"total": 3447.8184494131747,
"count": 63537,
"self": 2.9185468542705166,
"children": {
"env_step": {
"total": 2295.2558236209798,
"count": 63537,
"self": 2114.3017874949896,
"children": {
"SubprocessEnvManager._take_step": {
"total": 179.26950235089816,
"count": 63537,
"self": 8.102439216821494,
"children": {
"TorchPolicy.evaluate": {
"total": 171.16706313407667,
"count": 62566,
"self": 171.16706313407667
}
}
},
"workers": {
"total": 1.6845337750919498,
"count": 63537,
"self": 0.0,
"children": {
"worker_root": {
"total": 3445.579147448971,
"count": 63537,
"is_parallel": true,
"self": 1546.2820195639306,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0035194830002183153,
"count": 1,
"is_parallel": true,
"self": 0.0011272279998593149,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023922550003590004,
"count": 8,
"is_parallel": true,
"self": 0.0023922550003590004
}
}
},
"UnityEnvironment.step": {
"total": 0.14050427100028173,
"count": 1,
"is_parallel": true,
"self": 0.0008534260000487848,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0008023230002436321,
"count": 1,
"is_parallel": true,
"self": 0.0008023230002436321
},
"communicator.exchange": {
"total": 0.13675644499971895,
"count": 1,
"is_parallel": true,
"self": 0.13675644499971895
},
"steps_from_proto": {
"total": 0.002092077000270365,
"count": 1,
"is_parallel": true,
"self": 0.0004561310001918173,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016359460000785475,
"count": 8,
"is_parallel": true,
"self": 0.0016359460000785475
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1899.2971278850405,
"count": 63536,
"is_parallel": true,
"self": 55.86770819687035,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 33.13581512909968,
"count": 63536,
"is_parallel": true,
"self": 33.13581512909968
},
"communicator.exchange": {
"total": 1673.8121784120885,
"count": 63536,
"is_parallel": true,
"self": 1673.8121784120885
},
"steps_from_proto": {
"total": 136.4814261469819,
"count": 63536,
"is_parallel": true,
"self": 30.82965453187444,
"children": {
"_process_rank_one_or_two_observation": {
"total": 105.65177161510746,
"count": 508288,
"is_parallel": true,
"self": 105.65177161510746
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1149.6440789379244,
"count": 63537,
"self": 5.521701177053728,
"children": {
"process_trajectory": {
"total": 183.8459870478746,
"count": 63537,
"self": 183.6289911318745,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21699591600008716,
"count": 2,
"self": 0.21699591600008716
}
}
},
"_update_policy": {
"total": 960.2763907129961,
"count": 456,
"self": 384.4068423490271,
"children": {
"TorchPPOOptimizer.update": {
"total": 575.869548363969,
"count": 22791,
"self": 575.869548363969
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.710999640636146e-06,
"count": 1,
"self": 1.710999640636146e-06
},
"TrainerController._save_models": {
"total": 0.10767331099941657,
"count": 1,
"self": 0.0025537079991408973,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10511960300027567,
"count": 1,
"self": 0.10511960300027567
}
}
}
}
}
}
}