ppo-Huggy / run_logs /timers.json
MohannadTak's picture
Huggy
9df4db6
raw
history blame
17.4 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4100881814956665,
"min": 1.4100881814956665,
"max": 1.4312759637832642,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69083.0390625,
"min": 67911.125,
"max": 77900.703125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 90.38025594149909,
"min": 84.71698113207547,
"max": 389.7751937984496,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49438.0,
"min": 48883.0,
"max": 50281.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999319.0,
"min": 49660.0,
"max": 1999319.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999319.0,
"min": 49660.0,
"max": 1999319.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.398030996322632,
"min": 0.13617312908172607,
"max": 2.4799394607543945,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1311.722900390625,
"min": 17.430160522460938,
"max": 1393.77001953125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.743893513186976,
"min": 1.7695617359131575,
"max": 3.976925612571868,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2047.909751713276,
"min": 226.50390219688416,
"max": 2238.6927476525307,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.743893513186976,
"min": 1.7695617359131575,
"max": 3.976925612571868,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2047.909751713276,
"min": 226.50390219688416,
"max": 2238.6927476525307,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01650383372082918,
"min": 0.014606513604182206,
"max": 0.02024615408736281,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.049511501162487544,
"min": 0.02921302720836441,
"max": 0.05473531356741053,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.055553401758273446,
"min": 0.02152939795826872,
"max": 0.06394417852991158,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16666020527482034,
"min": 0.04305879591653744,
"max": 0.19183253558973473,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5220988260000033e-06,
"min": 3.5220988260000033e-06,
"max": 0.000295350976549675,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.056629647800001e-05,
"min": 1.056629647800001e-05,
"max": 0.0008441490186169999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10117400000000003,
"min": 0.10117400000000003,
"max": 0.198450325,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30352200000000007,
"min": 0.2074952,
"max": 0.581383,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.858260000000006e-05,
"min": 6.858260000000006e-05,
"max": 0.004922671217499999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020574780000000016,
"min": 0.00020574780000000016,
"max": 0.0140710117,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1694887500",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1694889979"
},
"total": 2478.738035515,
"count": 1,
"self": 0.7532877949997783,
"children": {
"run_training.setup": {
"total": 0.06722887099999753,
"count": 1,
"self": 0.06722887099999753
},
"TrainerController.start_learning": {
"total": 2477.917518849,
"count": 1,
"self": 4.527143901008003,
"children": {
"TrainerController._reset_env": {
"total": 5.339109407000024,
"count": 1,
"self": 5.339109407000024
},
"TrainerController.advance": {
"total": 2467.8547795639915,
"count": 232199,
"self": 4.628824688006716,
"children": {
"env_step": {
"total": 1900.809072613996,
"count": 232199,
"self": 1602.5566032120823,
"children": {
"SubprocessEnvManager._take_step": {
"total": 295.3791412860471,
"count": 232199,
"self": 17.29901099206154,
"children": {
"TorchPolicy.evaluate": {
"total": 278.0801302939856,
"count": 222872,
"self": 278.0801302939856
}
}
},
"workers": {
"total": 2.8733281158665704,
"count": 232199,
"self": 0.0,
"children": {
"worker_root": {
"total": 2470.089399877004,
"count": 232199,
"is_parallel": true,
"self": 1159.686952355045,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010562330000993825,
"count": 1,
"is_parallel": true,
"self": 0.0003002210003160144,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007560119997833681,
"count": 2,
"is_parallel": true,
"self": 0.0007560119997833681
}
}
},
"UnityEnvironment.step": {
"total": 0.028874622000103045,
"count": 1,
"is_parallel": true,
"self": 0.00028481800018198555,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002308600001015293,
"count": 1,
"is_parallel": true,
"self": 0.0002308600001015293
},
"communicator.exchange": {
"total": 0.027604676999999356,
"count": 1,
"is_parallel": true,
"self": 0.027604676999999356
},
"steps_from_proto": {
"total": 0.0007542669998201745,
"count": 1,
"is_parallel": true,
"self": 0.0002283199999055796,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005259469999145949,
"count": 2,
"is_parallel": true,
"self": 0.0005259469999145949
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1310.402447521959,
"count": 232198,
"is_parallel": true,
"self": 39.51858958517505,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 81.80297584390973,
"count": 232198,
"is_parallel": true,
"self": 81.80297584390973
},
"communicator.exchange": {
"total": 1089.4446266149257,
"count": 232198,
"is_parallel": true,
"self": 1089.4446266149257
},
"steps_from_proto": {
"total": 99.63625547794845,
"count": 232198,
"is_parallel": true,
"self": 35.45971605192494,
"children": {
"_process_rank_one_or_two_observation": {
"total": 64.17653942602351,
"count": 464396,
"is_parallel": true,
"self": 64.17653942602351
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 562.4168822619886,
"count": 232199,
"self": 6.730366939961414,
"children": {
"process_trajectory": {
"total": 140.5516389840261,
"count": 232199,
"self": 139.09423618702704,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4574027969990766,
"count": 10,
"self": 1.4574027969990766
}
}
},
"_update_policy": {
"total": 415.13487633800105,
"count": 97,
"self": 353.2212029799982,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.913673358002825,
"count": 2910,
"self": 61.913673358002825
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5609998627041932e-06,
"count": 1,
"self": 1.5609998627041932e-06
},
"TrainerController._save_models": {
"total": 0.19648441600020305,
"count": 1,
"self": 0.0030339680001816305,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19345044800002142,
"count": 1,
"self": 0.19345044800002142
}
}
}
}
}
}
}