ppo-PyramidsRND / run_logs /timers.json
Statos6's picture
First Push
96030dc verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3553622364997864,
"min": 0.3292371928691864,
"max": 1.3780255317687988,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10768.8974609375,
"min": 9882.3837890625,
"max": 41803.78125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989983.0,
"min": 29915.0,
"max": 989983.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989983.0,
"min": 29915.0,
"max": 989983.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.701176643371582,
"min": -0.08960031718015671,
"max": 0.7213871479034424,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 206.84710693359375,
"min": -21.683277130126953,
"max": 213.5305938720703,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0086286012083292,
"min": -0.005355819594115019,
"max": 0.4184195101261139,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.5454373359680176,
"min": -1.4996294975280762,
"max": 99.16542053222656,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07156671694703583,
"min": 0.06400450659807053,
"max": 0.07385286327860063,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0019340372585015,
"min": 0.5054248289334596,
"max": 1.0582794581783033,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01801811614307296,
"min": 0.00043296472886908893,
"max": 0.01801811614307296,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2522536260030214,
"min": 0.005628541475298156,
"max": 0.2522536260030214,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.729275995035714e-06,
"min": 7.729275995035714e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010820986393050001,
"min": 0.00010820986393050001,
"max": 0.003632241189253,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10257639285714286,
"min": 0.10257639285714286,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4360695,
"min": 1.3886848,
"max": 2.6107470000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026738164642857144,
"min": 0.00026738164642857144,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00374334305,
"min": 0.00374334305,
"max": 0.12109362530000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010969838127493858,
"min": 0.010969838127493858,
"max": 0.45114240050315857,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15357773005962372,
"min": 0.15357773005962372,
"max": 3.157996892929077,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 267.66386554621846,
"min": 256.73275862068965,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31852.0,
"min": 16842.0,
"max": 33011.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6992520813931937,
"min": -0.9999871489501768,
"max": 1.7406880605384845,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 202.21099768579006,
"min": -30.999601617455482,
"max": 202.21099768579006,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6992520813931937,
"min": -0.9999871489501768,
"max": 1.7406880605384845,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 202.21099768579006,
"min": -30.999601617455482,
"max": 202.21099768579006,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.030625398024687243,
"min": 0.029938262647002653,
"max": 9.25762802362442,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.644422364937782,
"min": 3.442900204405305,
"max": 157.37967640161514,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1709060549",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1709062759"
},
"total": 2210.2115979,
"count": 1,
"self": 0.9010030470003585,
"children": {
"run_training.setup": {
"total": 0.051689949999854434,
"count": 1,
"self": 0.051689949999854434
},
"TrainerController.start_learning": {
"total": 2209.258904903,
"count": 1,
"self": 1.3288486260166792,
"children": {
"TrainerController._reset_env": {
"total": 2.4321096779999607,
"count": 1,
"self": 2.4321096779999607
},
"TrainerController.advance": {
"total": 2205.384784044983,
"count": 64170,
"self": 1.4252789349511659,
"children": {
"env_step": {
"total": 1582.5691336279178,
"count": 64170,
"self": 1458.170665863824,
"children": {
"SubprocessEnvManager._take_step": {
"total": 123.59485966600369,
"count": 64170,
"self": 4.586933456974293,
"children": {
"TorchPolicy.evaluate": {
"total": 119.00792620902939,
"count": 62576,
"self": 119.00792620902939
}
}
},
"workers": {
"total": 0.8036080980900806,
"count": 64170,
"self": 0.0,
"children": {
"worker_root": {
"total": 2204.2312615840156,
"count": 64170,
"is_parallel": true,
"self": 857.9710342650251,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021462300001076073,
"count": 1,
"is_parallel": true,
"self": 0.0006898850006109569,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014563449994966504,
"count": 8,
"is_parallel": true,
"self": 0.0014563449994966504
}
}
},
"UnityEnvironment.step": {
"total": 0.04850034900027822,
"count": 1,
"is_parallel": true,
"self": 0.0005753730001742952,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005049950000284298,
"count": 1,
"is_parallel": true,
"self": 0.0005049950000284298
},
"communicator.exchange": {
"total": 0.04578265999998621,
"count": 1,
"is_parallel": true,
"self": 0.04578265999998621
},
"steps_from_proto": {
"total": 0.0016373210000892868,
"count": 1,
"is_parallel": true,
"self": 0.0003382970007805852,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012990239993087016,
"count": 8,
"is_parallel": true,
"self": 0.0012990239993087016
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1346.2602273189905,
"count": 64169,
"is_parallel": true,
"self": 34.70479730099305,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.104658128037954,
"count": 64169,
"is_parallel": true,
"self": 24.104658128037954
},
"communicator.exchange": {
"total": 1190.4014419869427,
"count": 64169,
"is_parallel": true,
"self": 1190.4014419869427
},
"steps_from_proto": {
"total": 97.04932990301677,
"count": 64169,
"is_parallel": true,
"self": 18.97622890274306,
"children": {
"_process_rank_one_or_two_observation": {
"total": 78.07310100027371,
"count": 513352,
"is_parallel": true,
"self": 78.07310100027371
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 621.390371482114,
"count": 64170,
"self": 2.5299614221135016,
"children": {
"process_trajectory": {
"total": 125.6553079109949,
"count": 64170,
"self": 125.42975579299491,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22555211799999597,
"count": 2,
"self": 0.22555211799999597
}
}
},
"_update_policy": {
"total": 493.2051021490056,
"count": 454,
"self": 288.62239076202013,
"children": {
"TorchPPOOptimizer.update": {
"total": 204.58271138698547,
"count": 22824,
"self": 204.58271138698547
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2840000636060722e-06,
"count": 1,
"self": 1.2840000636060722e-06
},
"TrainerController._save_models": {
"total": 0.11316127000009146,
"count": 1,
"self": 0.0021574350002993015,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11100383499979216,
"count": 1,
"self": 0.11100383499979216
}
}
}
}
}
}
}