pyramid / run_logs /timers.json
jakezou's picture
First Push
e90b621
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5399750471115112,
"min": 0.5399750471115112,
"max": 1.4519519805908203,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16251.0888671875,
"min": 16251.0888671875,
"max": 44046.4140625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989945.0,
"min": 29872.0,
"max": 989945.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989945.0,
"min": 29872.0,
"max": 989945.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3227290213108063,
"min": -0.09820189327001572,
"max": 0.3537415564060211,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 83.9095458984375,
"min": -23.666656494140625,
"max": 92.68029022216797,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.4602455794811249,
"min": -0.03164992853999138,
"max": 0.4602455794811249,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 119.66384887695312,
"min": -8.260631561279297,
"max": 119.66384887695312,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06884274974748092,
"min": 0.06425404305401673,
"max": 0.07244754452821675,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9637984964647329,
"min": 0.5768311019954242,
"max": 1.042963239150898,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0557487743714037,
"min": 0.0005109460279241244,
"max": 0.0557487743714037,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.7804828411996518,
"min": 0.006642298363013617,
"max": 0.7804828411996518,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.319261845992866e-06,
"min": 7.319261845992866e-06,
"max": 0.0002947644017452,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010246966584390011,
"min": 0.00010246966584390011,
"max": 0.0033729199756934,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10243972142857141,
"min": 0.10243972142857141,
"max": 0.1982548,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4341560999999998,
"min": 1.4341560999999998,
"max": 2.4851554,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025372817071428596,
"min": 0.00025372817071428596,
"max": 0.009825654520000001,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035521943900000035,
"min": 0.0035521943900000035,
"max": 0.11244822934000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010192131623625755,
"min": 0.010192131623625755,
"max": 0.6322476863861084,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14268983900547028,
"min": 0.14268983900547028,
"max": 5.057981491088867,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 502.3898305084746,
"min": 466.55555555555554,
"max": 995.5806451612904,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29641.0,
"min": 17182.0,
"max": 32121.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.2263321791917592,
"min": -0.925387146251817,
"max": 1.3783718453487381,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 72.35359857231379,
"min": -28.687001533806324,
"max": 88.21579810231924,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.2263321791917592,
"min": -0.925387146251817,
"max": 1.3783718453487381,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 72.35359857231379,
"min": -28.687001533806324,
"max": 88.21579810231924,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0536589139443033,
"min": 0.0536589139443033,
"max": 13.030622682637638,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1658759227138944,
"min": 3.1658759227138944,
"max": 234.5512082874775,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691567840",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1691569980"
},
"total": 2139.2600702560003,
"count": 1,
"self": 0.47706513700040887,
"children": {
"run_training.setup": {
"total": 0.039285389999804465,
"count": 1,
"self": 0.039285389999804465
},
"TrainerController.start_learning": {
"total": 2138.743719729,
"count": 1,
"self": 1.3533703351095028,
"children": {
"TrainerController._reset_env": {
"total": 4.853995445000237,
"count": 1,
"self": 4.853995445000237
},
"TrainerController.advance": {
"total": 2132.443922358891,
"count": 63487,
"self": 1.3950255468880641,
"children": {
"env_step": {
"total": 1463.652125551053,
"count": 63487,
"self": 1353.3037801051,
"children": {
"SubprocessEnvManager._take_step": {
"total": 109.53678360307913,
"count": 63487,
"self": 4.650957185085645,
"children": {
"TorchPolicy.evaluate": {
"total": 104.88582641799348,
"count": 62570,
"self": 104.88582641799348
}
}
},
"workers": {
"total": 0.8115618428737434,
"count": 63487,
"self": 0.0,
"children": {
"worker_root": {
"total": 2133.9334333939846,
"count": 63487,
"is_parallel": true,
"self": 894.3006750360692,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017576329996700224,
"count": 1,
"is_parallel": true,
"self": 0.0005511569988811971,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012064760007888253,
"count": 8,
"is_parallel": true,
"self": 0.0012064760007888253
}
}
},
"UnityEnvironment.step": {
"total": 0.08123048800007382,
"count": 1,
"is_parallel": true,
"self": 0.0006107010003688629,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048702499998398707,
"count": 1,
"is_parallel": true,
"self": 0.00048702499998398707
},
"communicator.exchange": {
"total": 0.07832975299970713,
"count": 1,
"is_parallel": true,
"self": 0.07832975299970713
},
"steps_from_proto": {
"total": 0.0018030090000138443,
"count": 1,
"is_parallel": true,
"self": 0.00035532799984139274,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014476810001724516,
"count": 8,
"is_parallel": true,
"self": 0.0014476810001724516
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1239.6327583579155,
"count": 63486,
"is_parallel": true,
"self": 34.29625882991786,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.2207602299859,
"count": 63486,
"is_parallel": true,
"self": 23.2207602299859
},
"communicator.exchange": {
"total": 1077.6690098899317,
"count": 63486,
"is_parallel": true,
"self": 1077.6690098899317
},
"steps_from_proto": {
"total": 104.44672940808005,
"count": 63486,
"is_parallel": true,
"self": 20.131587631167804,
"children": {
"_process_rank_one_or_two_observation": {
"total": 84.31514177691224,
"count": 507888,
"is_parallel": true,
"self": 84.31514177691224
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 667.3967712609501,
"count": 63487,
"self": 2.476039558957382,
"children": {
"process_trajectory": {
"total": 110.98110709299681,
"count": 63487,
"self": 110.77432993299726,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2067771599995467,
"count": 2,
"self": 0.2067771599995467
}
}
},
"_update_policy": {
"total": 553.9396246089959,
"count": 454,
"self": 361.3766866469764,
"children": {
"TorchPPOOptimizer.update": {
"total": 192.56293796201953,
"count": 22785,
"self": 192.56293796201953
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.259994501713663e-07,
"count": 1,
"self": 8.259994501713663e-07
},
"TrainerController._save_models": {
"total": 0.09243076399980055,
"count": 1,
"self": 0.0013125659997967887,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09111819800000376,
"count": 1,
"self": 0.09111819800000376
}
}
}
}
}
}
}