ppo-Huggy / run_logs /timers.json
kamara3k's picture
Huggy
477c01a
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4070724248886108,
"min": 1.4070724248886108,
"max": 1.4291751384735107,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70255.125,
"min": 67283.3359375,
"max": 79915.546875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 106.95680345572354,
"min": 82.19134775374376,
"max": 424.0,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49521.0,
"min": 49165.0,
"max": 50120.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999973.0,
"min": 49432.0,
"max": 1999973.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999973.0,
"min": 49432.0,
"max": 1999973.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.315603494644165,
"min": 0.06453980505466461,
"max": 2.4812848567962646,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1072.1243896484375,
"min": 7.551156997680664,
"max": 1458.157958984375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.421364369969131,
"min": 2.0234359235335617,
"max": 3.9674112116437583,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1584.0917032957077,
"min": 236.74200305342674,
"max": 2384.414138197899,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.421364369969131,
"min": 2.0234359235335617,
"max": 3.9674112116437583,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1584.0917032957077,
"min": 236.74200305342674,
"max": 2384.414138197899,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018010613367530622,
"min": 0.011901670469122414,
"max": 0.020557434094371274,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05403184010259186,
"min": 0.02380334093824483,
"max": 0.059350711955145616,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.048276150516337824,
"min": 0.021620574469367663,
"max": 0.06343948977688948,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.14482845154901347,
"min": 0.043241148938735326,
"max": 0.17967674769461156,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.1028489657500085e-06,
"min": 3.1028489657500085e-06,
"max": 0.000295274476575175,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.308546897250026e-06,
"min": 9.308546897250026e-06,
"max": 0.0008437017187660998,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10103425000000003,
"min": 0.10103425000000003,
"max": 0.19842482500000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3031027500000001,
"min": 0.20724945,
"max": 0.5812339000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.160907500000017e-05,
"min": 6.160907500000017e-05,
"max": 0.0049213987675,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00018482722500000052,
"min": 0.00018482722500000052,
"max": 0.01406357161,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1694196803",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1694199319"
},
"total": 2515.441576524,
"count": 1,
"self": 0.39167299600012484,
"children": {
"run_training.setup": {
"total": 0.06301339400010875,
"count": 1,
"self": 0.06301339400010875
},
"TrainerController.start_learning": {
"total": 2514.9868901339996,
"count": 1,
"self": 4.545852993052904,
"children": {
"TrainerController._reset_env": {
"total": 4.915808806000086,
"count": 1,
"self": 4.915808806000086
},
"TrainerController.advance": {
"total": 2505.404180748947,
"count": 232419,
"self": 4.7343023882122,
"children": {
"env_step": {
"total": 1946.8714471907979,
"count": 232419,
"self": 1644.30081255692,
"children": {
"SubprocessEnvManager._take_step": {
"total": 299.560436999953,
"count": 232419,
"self": 17.16815577692796,
"children": {
"TorchPolicy.evaluate": {
"total": 282.39228122302507,
"count": 223080,
"self": 282.39228122302507
}
}
},
"workers": {
"total": 3.0101976339249177,
"count": 232419,
"self": 0.0,
"children": {
"worker_root": {
"total": 2507.3594481460536,
"count": 232419,
"is_parallel": true,
"self": 1164.4183402570711,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010965120000037132,
"count": 1,
"is_parallel": true,
"self": 0.00032571800011282903,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007707939998908842,
"count": 2,
"is_parallel": true,
"self": 0.0007707939998908842
}
}
},
"UnityEnvironment.step": {
"total": 0.03204788300013206,
"count": 1,
"is_parallel": true,
"self": 0.0003527460000896099,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002422600000500097,
"count": 1,
"is_parallel": true,
"self": 0.0002422600000500097
},
"communicator.exchange": {
"total": 0.030677745000048162,
"count": 1,
"is_parallel": true,
"self": 0.030677745000048162
},
"steps_from_proto": {
"total": 0.0007751319999442785,
"count": 1,
"is_parallel": true,
"self": 0.000225006999926336,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005501250000179425,
"count": 2,
"is_parallel": true,
"self": 0.0005501250000179425
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1342.9411078889825,
"count": 232418,
"is_parallel": true,
"self": 40.2749665729591,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 86.32293315411084,
"count": 232418,
"is_parallel": true,
"self": 86.32293315411084
},
"communicator.exchange": {
"total": 1114.7033896609496,
"count": 232418,
"is_parallel": true,
"self": 1114.7033896609496
},
"steps_from_proto": {
"total": 101.63981850096297,
"count": 232418,
"is_parallel": true,
"self": 38.762589105116604,
"children": {
"_process_rank_one_or_two_observation": {
"total": 62.87722939584637,
"count": 464836,
"is_parallel": true,
"self": 62.87722939584637
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 553.798431169937,
"count": 232419,
"self": 6.479552475758737,
"children": {
"process_trajectory": {
"total": 142.23268056517782,
"count": 232419,
"self": 140.64142143917752,
"children": {
"RLTrainer._checkpoint": {
"total": 1.5912591260002955,
"count": 10,
"self": 1.5912591260002955
}
}
},
"_update_policy": {
"total": 405.0861981290004,
"count": 97,
"self": 345.43451777500127,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.651680353999154,
"count": 2910,
"self": 59.651680353999154
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.165999947261298e-06,
"count": 1,
"self": 1.165999947261298e-06
},
"TrainerController._save_models": {
"total": 0.12104641999985688,
"count": 1,
"self": 0.0024513489997843863,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1185950710000725,
"count": 1,
"self": 0.1185950710000725
}
}
}
}
}
}
}