ppo-Huggy / run_logs /timers.json
FnSK4R17s's picture
Huggy
df5c199
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.3970693349838257,
"min": 1.3970624208450317,
"max": 1.4273605346679688,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71103.84375,
"min": 68994.4453125,
"max": 77544.4765625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 76.88646967340591,
"min": 71.6759941089838,
"max": 404.35483870967744,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49438.0,
"min": 48668.0,
"max": 50140.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999981.0,
"min": 49921.0,
"max": 1999981.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999981.0,
"min": 49921.0,
"max": 1999981.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.529876708984375,
"min": 0.049263179302215576,
"max": 2.5418813228607178,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1626.710693359375,
"min": 6.059370994567871,
"max": 1676.0794677734375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.9007270669862977,
"min": 1.860346065546439,
"max": 4.009829531783707,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2508.1675040721893,
"min": 228.822566062212,
"max": 2614.4088547229767,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.9007270669862977,
"min": 1.860346065546439,
"max": 4.009829531783707,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2508.1675040721893,
"min": 228.822566062212,
"max": 2614.4088547229767,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01577033059729729,
"min": 0.013188009662553669,
"max": 0.018836015780478495,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04731099179189187,
"min": 0.026376019325107337,
"max": 0.05631129989924375,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06135362357729011,
"min": 0.021839615671585003,
"max": 0.0659477765361468,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.18406087073187033,
"min": 0.043679231343170005,
"max": 0.1978433296084404,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.065248644949995e-06,
"min": 4.065248644949995e-06,
"max": 0.00029536372654542504,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.2195745934849985e-05,
"min": 1.2195745934849985e-05,
"max": 0.0008444893685035497,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10135505,
"min": 0.10135505,
"max": 0.198454575,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30406515,
"min": 0.20789585,
"max": 0.58149645,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.761699499999994e-05,
"min": 7.761699499999994e-05,
"max": 0.004922883292499999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002328509849999998,
"min": 0.0002328509849999998,
"max": 0.014076672855,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1672849693",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1672852000"
},
"total": 2307.043704233,
"count": 1,
"self": 0.3906339599993771,
"children": {
"run_training.setup": {
"total": 0.10615941000003204,
"count": 1,
"self": 0.10615941000003204
},
"TrainerController.start_learning": {
"total": 2306.5469108630004,
"count": 1,
"self": 3.8462015959412383,
"children": {
"TrainerController._reset_env": {
"total": 7.386240528999963,
"count": 1,
"self": 7.386240528999963
},
"TrainerController.advance": {
"total": 2295.189979783059,
"count": 233429,
"self": 4.07074854703842,
"children": {
"env_step": {
"total": 1821.2316896339328,
"count": 233429,
"self": 1531.4703649138924,
"children": {
"SubprocessEnvManager._take_step": {
"total": 287.06674520006703,
"count": 233429,
"self": 14.762443229009023,
"children": {
"TorchPolicy.evaluate": {
"total": 272.304301971058,
"count": 223028,
"self": 67.77070203305772,
"children": {
"TorchPolicy.sample_actions": {
"total": 204.5335999380003,
"count": 223028,
"self": 204.5335999380003
}
}
}
}
},
"workers": {
"total": 2.6945795199732743,
"count": 233429,
"self": 0.0,
"children": {
"worker_root": {
"total": 2298.4269883060324,
"count": 233429,
"is_parallel": true,
"self": 1037.2276204180212,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0029385060000208796,
"count": 1,
"is_parallel": true,
"self": 0.00041227300005175493,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0025262329999691246,
"count": 2,
"is_parallel": true,
"self": 0.0025262329999691246
}
}
},
"UnityEnvironment.step": {
"total": 0.028739409999957388,
"count": 1,
"is_parallel": true,
"self": 0.00029130399997256973,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002044240000032005,
"count": 1,
"is_parallel": true,
"self": 0.0002044240000032005
},
"communicator.exchange": {
"total": 0.027510754999980236,
"count": 1,
"is_parallel": true,
"self": 0.027510754999980236
},
"steps_from_proto": {
"total": 0.0007329270000013821,
"count": 1,
"is_parallel": true,
"self": 0.00024702600001091923,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004859009999904629,
"count": 2,
"is_parallel": true,
"self": 0.0004859009999904629
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1261.1993678880112,
"count": 233428,
"is_parallel": true,
"self": 35.62272442982021,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 81.96604175502125,
"count": 233428,
"is_parallel": true,
"self": 81.96604175502125
},
"communicator.exchange": {
"total": 1045.612217896073,
"count": 233428,
"is_parallel": true,
"self": 1045.612217896073
},
"steps_from_proto": {
"total": 97.99838380709679,
"count": 233428,
"is_parallel": true,
"self": 42.56883378702514,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.429550020071645,
"count": 466856,
"is_parallel": true,
"self": 55.429550020071645
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 469.8875416020875,
"count": 233429,
"self": 6.0881078401608875,
"children": {
"process_trajectory": {
"total": 160.16291442692727,
"count": 233429,
"self": 158.81456255392754,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3483518729997286,
"count": 10,
"self": 1.3483518729997286
}
}
},
"_update_policy": {
"total": 303.6365193349993,
"count": 97,
"self": 251.33814743099617,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.29837190400315,
"count": 2910,
"self": 52.29837190400315
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.560000424040481e-07,
"count": 1,
"self": 8.560000424040481e-07
},
"TrainerController._save_models": {
"total": 0.12448809900024571,
"count": 1,
"self": 0.0020294640003157838,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12245863499992993,
"count": 1,
"self": 0.12245863499992993
}
}
}
}
}
}
}