TaTo69's picture
First commit
5a8361f
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.12892360985279083,
"min": 0.1215721145272255,
"max": 1.3557578325271606,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 3849.143310546875,
"min": 3647.163330078125,
"max": 41128.26953125,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999922.0,
"min": 29898.0,
"max": 2999922.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999922.0,
"min": 29898.0,
"max": 2999922.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7925044298171997,
"min": -0.09550029039382935,
"max": 0.8931336402893066,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 236.95883178710938,
"min": -23.01556968688965,
"max": 273.17584228515625,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.010758834891021252,
"min": -0.08753448724746704,
"max": 0.386795312166214,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.2168915271759033,
"min": -22.058691024780273,
"max": 91.67048645019531,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06848765789221962,
"min": 0.06350920135193365,
"max": 0.07449852871237643,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9588272104910747,
"min": 0.4744383845218703,
"max": 1.073822987147187,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013585168394788598,
"min": 0.00038379569667890953,
"max": 0.016694437838547554,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19019235752704036,
"min": 0.003454161270110186,
"max": 0.23372212973966575,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.4560066575547615e-06,
"min": 1.4560066575547615e-06,
"max": 0.00029841174338656194,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.0384093205766662e-05,
"min": 2.0384093205766662e-05,
"max": 0.0039693416768861334,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10048530238095237,
"min": 0.10048530238095237,
"max": 0.19947058095238096,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4067942333333332,
"min": 1.3962940666666668,
"max": 2.737598366666667,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 5.848170785714286e-05,
"min": 5.848170785714286e-05,
"max": 0.009947111037142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008187439100000001,
"min": 0.0008187439100000001,
"max": 0.13231907528,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.0064279548823833466,
"min": 0.006295084487646818,
"max": 0.48187491297721863,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.08999136835336685,
"min": 0.08813118189573288,
"max": 3.373124361038208,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 214.1578947368421,
"min": 195.17880794701986,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28483.0,
"min": 16697.0,
"max": 32536.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.770919383350593,
"min": -0.999962551984936,
"max": 1.7926978290938644,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 237.30319736897945,
"min": -31.998801663517952,
"max": 272.3171975016594,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.770919383350593,
"min": -0.999962551984936,
"max": 1.7926978290938644,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 237.30319736897945,
"min": -31.998801663517952,
"max": 272.3171975016594,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.014109039775277454,
"min": 0.013696996541424696,
"max": 9.172356058569516,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.8906113298871787,
"min": 1.8906113298871787,
"max": 155.93005299568176,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1701702007",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1701709218"
},
"total": 7211.203173568,
"count": 1,
"self": 0.6457822100010162,
"children": {
"run_training.setup": {
"total": 0.05269439299991063,
"count": 1,
"self": 0.05269439299991063
},
"TrainerController.start_learning": {
"total": 7210.504696964999,
"count": 1,
"self": 3.9923694331610022,
"children": {
"TrainerController._reset_env": {
"total": 3.0627412639998965,
"count": 1,
"self": 3.0627412639998965
},
"TrainerController.advance": {
"total": 7203.347211178837,
"count": 195173,
"self": 4.071902204720573,
"children": {
"env_step": {
"total": 5351.746946869085,
"count": 195173,
"self": 4979.025988699417,
"children": {
"SubprocessEnvManager._take_step": {
"total": 370.22445013274523,
"count": 195173,
"self": 13.690965204720214,
"children": {
"TorchPolicy.evaluate": {
"total": 356.533484928025,
"count": 187549,
"self": 356.533484928025
}
}
},
"workers": {
"total": 2.4965080369229327,
"count": 195173,
"self": 0.0,
"children": {
"worker_root": {
"total": 7197.187088753915,
"count": 195173,
"is_parallel": true,
"self": 2562.168583248931,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019861499999933585,
"count": 1,
"is_parallel": true,
"self": 0.0006163550003748242,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013697949996185343,
"count": 8,
"is_parallel": true,
"self": 0.0013697949996185343
}
}
},
"UnityEnvironment.step": {
"total": 0.09139408700002605,
"count": 1,
"is_parallel": true,
"self": 0.0006452439999975468,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045229000011204334,
"count": 1,
"is_parallel": true,
"self": 0.00045229000011204334
},
"communicator.exchange": {
"total": 0.08871684399991864,
"count": 1,
"is_parallel": true,
"self": 0.08871684399991864
},
"steps_from_proto": {
"total": 0.0015797089999978198,
"count": 1,
"is_parallel": true,
"self": 0.00032636000037200574,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001253348999625814,
"count": 8,
"is_parallel": true,
"self": 0.001253348999625814
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4635.018505504984,
"count": 195172,
"is_parallel": true,
"self": 102.40685103182113,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 71.16754488206175,
"count": 195172,
"is_parallel": true,
"self": 71.16754488206175
},
"communicator.exchange": {
"total": 4168.895824303004,
"count": 195172,
"is_parallel": true,
"self": 4168.895824303004
},
"steps_from_proto": {
"total": 292.54828528809776,
"count": 195172,
"is_parallel": true,
"self": 57.60247175311838,
"children": {
"_process_rank_one_or_two_observation": {
"total": 234.94581353497938,
"count": 1561376,
"is_parallel": true,
"self": 234.94581353497938
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1847.5283621050312,
"count": 195173,
"self": 8.09876412478252,
"children": {
"process_trajectory": {
"total": 377.81025067125415,
"count": 195173,
"self": 377.1727975502547,
"children": {
"RLTrainer._checkpoint": {
"total": 0.637453120999453,
"count": 6,
"self": 0.637453120999453
}
}
},
"_update_policy": {
"total": 1461.6193473089945,
"count": 1395,
"self": 870.7544198080006,
"children": {
"TorchPPOOptimizer.update": {
"total": 590.8649275009939,
"count": 68352,
"self": 590.8649275009939
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.184000211651437e-06,
"count": 1,
"self": 1.184000211651437e-06
},
"TrainerController._save_models": {
"total": 0.10237390500151378,
"count": 1,
"self": 0.0019402680027269525,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10043363699878682,
"count": 1,
"self": 0.10043363699878682
}
}
}
}
}
}
}