ppo-Pyramids / run_logs /timers.json
1daniar's picture
First Push
cb9089d
raw
history blame
18.8 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.1734270453453064,
"min": 0.1626211553812027,
"max": 1.4498789310455322,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 5169.51318359375,
"min": 4889.2529296875,
"max": 43983.52734375,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999902.0,
"min": 29952.0,
"max": 2999902.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999902.0,
"min": 29952.0,
"max": 2999902.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.754357635974884,
"min": -0.1312602460384369,
"max": 0.8611552119255066,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 215.74627685546875,
"min": -31.63372039794922,
"max": 254.04078674316406,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0033349106088280678,
"min": -0.014645138755440712,
"max": 0.4561350345611572,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.9537844061851501,
"min": -4.085993766784668,
"max": 108.10400390625,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06716880165151365,
"min": 0.06485454586800188,
"max": 0.07454089974219513,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9403632231211911,
"min": 0.5034031208518266,
"max": 1.1092770241472558,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014674293200230403,
"min": 0.00012045676588491576,
"max": 0.017596050165145247,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20544010480322564,
"min": 0.0013250244247340734,
"max": 0.24634470231203345,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.5148352093738128e-06,
"min": 1.5148352093738128e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.120769293123338e-05,
"min": 2.120769293123338e-05,
"max": 0.003842999019000367,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10050491190476189,
"min": 0.10050491190476189,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4070687666666666,
"min": 1.3962282666666668,
"max": 2.7526058666666673,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 6.0440699285714394e-05,
"min": 6.0440699285714394e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008461697900000015,
"min": 0.0008461697900000015,
"max": 0.12811186337,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.006210089195519686,
"min": 0.00600480055436492,
"max": 0.4624593257904053,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.08694124966859818,
"min": 0.0840672105550766,
"max": 3.237215280532837,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 249.6206896551724,
"min": 215.81617647058823,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28956.0,
"min": 15984.0,
"max": 32956.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7331344703661984,
"min": -1.0000000521540642,
"max": 1.7848393979081272,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 201.04359856247902,
"min": -31.998401656746864,
"max": 244.52299751341343,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7331344703661984,
"min": -1.0000000521540642,
"max": 1.7848393979081272,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 201.04359856247902,
"min": -31.998401656746864,
"max": 244.52299751341343,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.016095047999097632,
"min": 0.01404887657708371,
"max": 9.761986959725618,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.8670255678953254,
"min": 1.8072425858408678,
"max": 156.1917913556099,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690354683",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690362385"
},
"total": 7702.065467801,
"count": 1,
"self": 0.5471468510004343,
"children": {
"run_training.setup": {
"total": 0.03324788100007936,
"count": 1,
"self": 0.03324788100007936
},
"TrainerController.start_learning": {
"total": 7701.485073069,
"count": 1,
"self": 4.7972480500566235,
"children": {
"TrainerController._reset_env": {
"total": 4.037148493999894,
"count": 1,
"self": 4.037148493999894
},
"TrainerController.advance": {
"total": 7692.552408370942,
"count": 194379,
"self": 4.94181317370203,
"children": {
"env_step": {
"total": 5680.920608533129,
"count": 194379,
"self": 5326.844493499253,
"children": {
"SubprocessEnvManager._take_step": {
"total": 351.15077393709817,
"count": 194379,
"self": 15.398563165106907,
"children": {
"TorchPolicy.evaluate": {
"total": 335.75221077199126,
"count": 187544,
"self": 335.75221077199126
}
}
},
"workers": {
"total": 2.9253410967780837,
"count": 194379,
"self": 0.0,
"children": {
"worker_root": {
"total": 7684.347814948146,
"count": 194379,
"is_parallel": true,
"self": 2736.9356454742547,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017692420001367282,
"count": 1,
"is_parallel": true,
"self": 0.0005972090000341268,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011720330001026014,
"count": 8,
"is_parallel": true,
"self": 0.0011720330001026014
}
}
},
"UnityEnvironment.step": {
"total": 0.061581990000149744,
"count": 1,
"is_parallel": true,
"self": 0.0006242239999210142,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005231170000570273,
"count": 1,
"is_parallel": true,
"self": 0.0005231170000570273
},
"communicator.exchange": {
"total": 0.05846220800003721,
"count": 1,
"is_parallel": true,
"self": 0.05846220800003721
},
"steps_from_proto": {
"total": 0.001972441000134495,
"count": 1,
"is_parallel": true,
"self": 0.00042134700015594717,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015510939999785478,
"count": 8,
"is_parallel": true,
"self": 0.0015510939999785478
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4947.4121694738915,
"count": 194378,
"is_parallel": true,
"self": 108.66524012065747,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.00519569890798,
"count": 194378,
"is_parallel": true,
"self": 75.00519569890798
},
"communicator.exchange": {
"total": 4424.297072127102,
"count": 194378,
"is_parallel": true,
"self": 4424.297072127102
},
"steps_from_proto": {
"total": 339.4446615272234,
"count": 194378,
"is_parallel": true,
"self": 71.08098363265844,
"children": {
"_process_rank_one_or_two_observation": {
"total": 268.36367789456494,
"count": 1555024,
"is_parallel": true,
"self": 268.36367789456494
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2006.6899866641118,
"count": 194379,
"self": 8.958932142950516,
"children": {
"process_trajectory": {
"total": 363.22435207617036,
"count": 194379,
"self": 362.4030286361708,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8213234399995599,
"count": 6,
"self": 0.8213234399995599
}
}
},
"_update_policy": {
"total": 1634.5067024449909,
"count": 1388,
"self": 1069.7402425051293,
"children": {
"TorchPPOOptimizer.update": {
"total": 564.7664599398615,
"count": 68394,
"self": 564.7664599398615
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0170006135012954e-06,
"count": 1,
"self": 1.0170006135012954e-06
},
"TrainerController._save_models": {
"total": 0.09826713700022083,
"count": 1,
"self": 0.0014133509994280757,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09685378600079275,
"count": 1,
"self": 0.09685378600079275
}
}
}
}
}
}
}