ppo-Pyramids / run_logs /timers.json
osmancanyuca's picture
First Push
49d613c
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.22990769147872925,
"min": 0.22990769147872925,
"max": 0.9623711705207825,
"count": 30
},
"Pyramids.Policy.Entropy.sum": {
"value": 11484.3486328125,
"min": 11484.3486328125,
"max": 48380.32421875,
"count": 30
},
"Pyramids.Step.mean": {
"value": 1999978.0,
"min": 549877.0,
"max": 1999978.0,
"count": 30
},
"Pyramids.Step.sum": {
"value": 1999978.0,
"min": 549877.0,
"max": 1999978.0,
"count": 30
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.79498291015625,
"min": -0.049888525158166885,
"max": 0.8327198028564453,
"count": 30
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 404.64630126953125,
"min": -20.10507583618164,
"max": 423.02166748046875,
"count": 30
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.008816980756819248,
"min": -0.015706252306699753,
"max": 0.04321414232254028,
"count": 30
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.487843036651611,
"min": -7.664651393890381,
"max": 20.569931030273438,
"count": 30
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06890473043030297,
"min": 0.06715824836536068,
"max": 0.07270693400694214,
"count": 30
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.6537135303272712,
"min": 1.4417596207530268,
"max": 1.7449664161666114,
"count": 30
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013658327631775236,
"min": 0.002909782167301695,
"max": 0.015598461065061851,
"count": 30
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.3277998631626057,
"min": 0.06983477201524069,
"max": 0.36465456387811046,
"count": 30
},
"Pyramids.Policy.LearningRate.mean": {
"value": 3.761623746158331e-06,
"min": 3.761623746158331e-06,
"max": 0.0002208904688698525,
"count": 30
},
"Pyramids.Policy.LearningRate.sum": {
"value": 9.027896990779994e-05,
"min": 9.027896990779994e-05,
"max": 0.0051297780900742005,
"count": 30
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10125384166666666,
"min": 0.10125384166666666,
"max": 0.1736301475,
"count": 30
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.4300922,
"min": 2.4300922,
"max": 4.109925800000001,
"count": 30
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00013525878249999993,
"min": 0.00013525878249999993,
"max": 0.007365651735250001,
"count": 30
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0032462107799999984,
"min": 0.0032462107799999984,
"max": 0.17106158742,
"count": 30
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.005682992283254862,
"min": 0.005682992283254862,
"max": 0.017659258097410202,
"count": 30
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13639181852340698,
"min": 0.13236719369888306,
"max": 0.40205714106559753,
"count": 30
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 235.55405405405406,
"min": 223.0137614678899,
"max": 913.68,
"count": 30
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 52293.0,
"min": 45684.0,
"max": 52293.0,
"count": 30
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7374116968665574,
"min": -0.41572597678060885,
"max": 1.7752282960365897,
"count": 30
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 385.70539670437574,
"min": -22.449202746152878,
"max": 388.77499683201313,
"count": 30
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7374116968665574,
"min": -0.41572597678060885,
"max": 1.7752282960365897,
"count": 30
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 385.70539670437574,
"min": -22.449202746152878,
"max": 388.77499683201313,
"count": 30
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.013987002048534205,
"min": 0.013545649350124707,
"max": 0.16782157377339899,
"count": 30
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1051144547745935,
"min": 2.8492067171282542,
"max": 8.41888378362637,
"count": 30
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690894269",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690897948"
},
"total": 3678.1012117030004,
"count": 1,
"self": 0.4758699430003617,
"children": {
"run_training.setup": {
"total": 0.03274945100019977,
"count": 1,
"self": 0.03274945100019977
},
"TrainerController.start_learning": {
"total": 3677.592592309,
"count": 1,
"self": 2.0773819288069717,
"children": {
"TrainerController._reset_env": {
"total": 4.084842718999425,
"count": 1,
"self": 4.084842718999425
},
"TrainerController.advance": {
"total": 3671.334356512193,
"count": 97701,
"self": 1.9837136676160299,
"children": {
"env_step": {
"total": 2673.681366750845,
"count": 97701,
"self": 2512.7749599728977,
"children": {
"SubprocessEnvManager._take_step": {
"total": 159.7130742850877,
"count": 97701,
"self": 6.983719284030485,
"children": {
"TorchPolicy.evaluate": {
"total": 152.72935500105723,
"count": 93816,
"self": 152.72935500105723
}
}
},
"workers": {
"total": 1.193332492859554,
"count": 97701,
"self": 0.0,
"children": {
"worker_root": {
"total": 3669.4285612269287,
"count": 97701,
"is_parallel": true,
"self": 1328.2800386228382,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017689090000203578,
"count": 1,
"is_parallel": true,
"self": 0.0005289179998726468,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001239991000147711,
"count": 8,
"is_parallel": true,
"self": 0.001239991000147711
}
}
},
"UnityEnvironment.step": {
"total": 0.04616322499987291,
"count": 1,
"is_parallel": true,
"self": 0.0006317520001175581,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004857240001001628,
"count": 1,
"is_parallel": true,
"self": 0.0004857240001001628
},
"communicator.exchange": {
"total": 0.043255069999759144,
"count": 1,
"is_parallel": true,
"self": 0.043255069999759144
},
"steps_from_proto": {
"total": 0.0017906789998960448,
"count": 1,
"is_parallel": true,
"self": 0.0003428509980949457,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014478280018010992,
"count": 8,
"is_parallel": true,
"self": 0.0014478280018010992
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2341.1485226040904,
"count": 97700,
"is_parallel": true,
"self": 51.45940181703827,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 34.14660361704682,
"count": 97700,
"is_parallel": true,
"self": 34.14660361704682
},
"communicator.exchange": {
"total": 2102.989779297909,
"count": 97700,
"is_parallel": true,
"self": 2102.989779297909
},
"steps_from_proto": {
"total": 152.55273787209626,
"count": 97700,
"is_parallel": true,
"self": 30.888071267690066,
"children": {
"_process_rank_one_or_two_observation": {
"total": 121.6646666044062,
"count": 781600,
"is_parallel": true,
"self": 121.6646666044062
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 995.6692760937322,
"count": 97701,
"self": 4.01680010486416,
"children": {
"process_trajectory": {
"total": 169.72121598989634,
"count": 97701,
"self": 169.31345781989603,
"children": {
"RLTrainer._checkpoint": {
"total": 0.40775817000030656,
"count": 3,
"self": 0.40775817000030656
}
}
},
"_update_policy": {
"total": 821.9312599989717,
"count": 707,
"self": 535.8821322959229,
"children": {
"TorchPPOOptimizer.update": {
"total": 286.0491277030487,
"count": 34197,
"self": 286.0491277030487
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.739989425521344e-07,
"count": 1,
"self": 8.739989425521344e-07
},
"TrainerController._save_models": {
"total": 0.09601027500139026,
"count": 1,
"self": 0.0018373540024185786,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09417292099897168,
"count": 1,
"self": 0.09417292099897168
}
}
}
}
}
}
}