Pyramids / run_logs /timers.json
efainman's picture
First model
9478b92
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4808414876461029,
"min": 0.45646536350250244,
"max": 1.4761430025100708,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14525.259765625,
"min": 13642.8369140625,
"max": 44780.2734375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989982.0,
"min": 29952.0,
"max": 989982.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989982.0,
"min": 29952.0,
"max": 989982.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.41117236018180847,
"min": -0.1260172724723816,
"max": 0.4495900273323059,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 108.54949951171875,
"min": -30.370162963867188,
"max": 121.83889770507812,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.025955229997634888,
"min": 0.001035523833706975,
"max": 0.307271271944046,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.8521809577941895,
"min": 0.2692362070083618,
"max": 73.74510192871094,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06972378771923318,
"min": 0.06514819621779247,
"max": 0.07369894572346905,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9761330280692645,
"min": 0.48059853971798655,
"max": 1.0672664935021505,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.010373268114048345,
"min": 0.0001120277094408344,
"max": 0.013814569378143335,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.14522575359667683,
"min": 0.001456360222730847,
"max": 0.20619688992398247,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.424154668171426e-06,
"min": 7.424154668171426e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010393816535439997,
"min": 0.00010393816535439997,
"max": 0.0031400270533243997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247468571428572,
"min": 0.10247468571428572,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4346456,
"min": 1.3691136000000002,
"max": 2.3466756000000006,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002572211028571428,
"min": 0.0002572211028571428,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036010954399999994,
"min": 0.0036010954399999994,
"max": 0.10469289243999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009374582208693027,
"min": 0.009374582208693027,
"max": 0.4946654140949249,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13124415278434753,
"min": 0.13124415278434753,
"max": 3.462657928466797,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 449.4153846153846,
"min": 388.9078947368421,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29212.0,
"min": 15984.0,
"max": 33324.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4274553592388446,
"min": -1.0000000521540642,
"max": 1.5058026062814813,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 92.7845983505249,
"min": -32.000001668930054,
"max": 114.44099807739258,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4274553592388446,
"min": -1.0000000521540642,
"max": 1.5058026062814813,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 92.7845983505249,
"min": -32.000001668930054,
"max": 114.44099807739258,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04350496525741899,
"min": 0.040064290778613405,
"max": 11.339918397367,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.827822741732234,
"min": 2.827822741732234,
"max": 181.438694357872,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690548181",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690550179"
},
"total": 1997.1754200600003,
"count": 1,
"self": 0.376533362000373,
"children": {
"run_training.setup": {
"total": 0.03642111199997089,
"count": 1,
"self": 0.03642111199997089
},
"TrainerController.start_learning": {
"total": 1996.762465586,
"count": 1,
"self": 1.8417457710179406,
"children": {
"TrainerController._reset_env": {
"total": 4.610064933000103,
"count": 1,
"self": 4.610064933000103
},
"TrainerController.advance": {
"total": 1990.206255852982,
"count": 63469,
"self": 1.7728782128874627,
"children": {
"env_step": {
"total": 1337.9766422440039,
"count": 63469,
"self": 1207.4637519479638,
"children": {
"SubprocessEnvManager._take_step": {
"total": 129.4123704990352,
"count": 63469,
"self": 5.3272204789532225,
"children": {
"TorchPolicy.evaluate": {
"total": 124.08515002008198,
"count": 62559,
"self": 124.08515002008198
}
}
},
"workers": {
"total": 1.1005197970048357,
"count": 63469,
"self": 0.0,
"children": {
"worker_root": {
"total": 1993.7353670329908,
"count": 63469,
"is_parallel": true,
"self": 904.2853400540353,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021563249997598177,
"count": 1,
"is_parallel": true,
"self": 0.0006389270001818659,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015173979995779519,
"count": 8,
"is_parallel": true,
"self": 0.0015173979995779519
}
}
},
"UnityEnvironment.step": {
"total": 0.052196651000031125,
"count": 1,
"is_parallel": true,
"self": 0.0006926920000296377,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005477709996739577,
"count": 1,
"is_parallel": true,
"self": 0.0005477709996739577
},
"communicator.exchange": {
"total": 0.04886110600000393,
"count": 1,
"is_parallel": true,
"self": 0.04886110600000393
},
"steps_from_proto": {
"total": 0.0020950820003236004,
"count": 1,
"is_parallel": true,
"self": 0.00041708700018716627,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001677995000136434,
"count": 8,
"is_parallel": true,
"self": 0.001677995000136434
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1089.4500269789555,
"count": 63468,
"is_parallel": true,
"self": 28.919261706989346,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 19.919393866924565,
"count": 63468,
"is_parallel": true,
"self": 19.919393866924565
},
"communicator.exchange": {
"total": 958.6525405869716,
"count": 63468,
"is_parallel": true,
"self": 958.6525405869716
},
"steps_from_proto": {
"total": 81.95883081807006,
"count": 63468,
"is_parallel": true,
"self": 17.70434792986498,
"children": {
"_process_rank_one_or_two_observation": {
"total": 64.25448288820508,
"count": 507744,
"is_parallel": true,
"self": 64.25448288820508
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 650.4567353960906,
"count": 63469,
"self": 3.028257084075449,
"children": {
"process_trajectory": {
"total": 108.0495058720162,
"count": 63469,
"self": 107.81499983501635,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2345060369998464,
"count": 2,
"self": 0.2345060369998464
}
}
},
"_update_policy": {
"total": 539.378972439999,
"count": 435,
"self": 340.66695357000526,
"children": {
"TorchPPOOptimizer.update": {
"total": 198.7120188699937,
"count": 22875,
"self": 198.7120188699937
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.920004231389612e-07,
"count": 1,
"self": 9.920004231389612e-07
},
"TrainerController._save_models": {
"total": 0.10439803699955519,
"count": 1,
"self": 0.0014601409993701964,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10293789600018499,
"count": 1,
"self": 0.10293789600018499
}
}
}
}
}
}
}