ppo-Huggy / run_logs /timers.json
Mughes's picture
Huggy
1790c30
raw
history blame
17.4 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4017479419708252,
"min": 1.4017479419708252,
"max": 1.4275944232940674,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69694.90625,
"min": 68987.203125,
"max": 77277.765625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 90.07468123861567,
"min": 77.03582554517133,
"max": 404.504,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49451.0,
"min": 48838.0,
"max": 50563.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999993.0,
"min": 49939.0,
"max": 1999993.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999993.0,
"min": 49939.0,
"max": 1999993.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4940617084503174,
"min": 0.12479797005653381,
"max": 2.5124666690826416,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1369.2398681640625,
"min": 15.474947929382324,
"max": 1567.46435546875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.9200159112612405,
"min": 1.846497491963448,
"max": 4.0093982005907485,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2152.088735282421,
"min": 228.96568900346756,
"max": 2444.245959341526,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.9200159112612405,
"min": 1.846497491963448,
"max": 4.0093982005907485,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2152.088735282421,
"min": 228.96568900346756,
"max": 2444.245959341526,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01661093717088483,
"min": 0.012892281195672695,
"max": 0.01921457482724994,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.049832811512654486,
"min": 0.02578456239134539,
"max": 0.057103491279121954,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05346560031175613,
"min": 0.021649660139034195,
"max": 0.05793598648160696,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1603968009352684,
"min": 0.04329932027806839,
"max": 0.16896129970749219,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.7870487376833442e-06,
"min": 3.7870487376833442e-06,
"max": 0.000295311001563,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1361146213050033e-05,
"min": 1.1361146213050033e-05,
"max": 0.0008442919685693499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10126231666666669,
"min": 0.10126231666666669,
"max": 0.19843699999999997,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30378695000000006,
"min": 0.20765824999999993,
"max": 0.5814306499999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.298960166666686e-05,
"min": 7.298960166666686e-05,
"max": 0.004922006299999998,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021896880500000057,
"min": 0.00021896880500000057,
"max": 0.014073389435,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1693918616",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1693920998"
},
"total": 2381.487785915,
"count": 1,
"self": 0.43837822600016807,
"children": {
"run_training.setup": {
"total": 0.043711927000003925,
"count": 1,
"self": 0.043711927000003925
},
"TrainerController.start_learning": {
"total": 2381.005695762,
"count": 1,
"self": 4.153790612992452,
"children": {
"TrainerController._reset_env": {
"total": 4.100730655000007,
"count": 1,
"self": 4.100730655000007
},
"TrainerController.advance": {
"total": 2372.627213040008,
"count": 232554,
"self": 4.475146599986601,
"children": {
"env_step": {
"total": 1820.4426852520699,
"count": 232554,
"self": 1534.7850024662052,
"children": {
"SubprocessEnvManager._take_step": {
"total": 282.85087269294223,
"count": 232554,
"self": 15.86794408987987,
"children": {
"TorchPolicy.evaluate": {
"total": 266.98292860306236,
"count": 222950,
"self": 266.98292860306236
}
}
},
"workers": {
"total": 2.806810092922433,
"count": 232554,
"self": 0.0,
"children": {
"worker_root": {
"total": 2373.6881042318314,
"count": 232554,
"is_parallel": true,
"self": 1120.301155658824,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008983339999986129,
"count": 1,
"is_parallel": true,
"self": 0.00021602699996492447,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006823070000336884,
"count": 2,
"is_parallel": true,
"self": 0.0006823070000336884
}
}
},
"UnityEnvironment.step": {
"total": 0.02836897399998861,
"count": 1,
"is_parallel": true,
"self": 0.0003345080000372036,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021261299997377137,
"count": 1,
"is_parallel": true,
"self": 0.00021261299997377137
},
"communicator.exchange": {
"total": 0.02708822599998939,
"count": 1,
"is_parallel": true,
"self": 0.02708822599998939
},
"steps_from_proto": {
"total": 0.0007336269999882461,
"count": 1,
"is_parallel": true,
"self": 0.00019752599996536446,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005361010000228816,
"count": 2,
"is_parallel": true,
"self": 0.0005361010000228816
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1253.3869485730074,
"count": 232553,
"is_parallel": true,
"self": 39.432613244897084,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 79.83013374097567,
"count": 232553,
"is_parallel": true,
"self": 79.83013374097567
},
"communicator.exchange": {
"total": 1037.4760307760516,
"count": 232553,
"is_parallel": true,
"self": 1037.4760307760516
},
"steps_from_proto": {
"total": 96.64817081108305,
"count": 232553,
"is_parallel": true,
"self": 34.100947352204514,
"children": {
"_process_rank_one_or_two_observation": {
"total": 62.547223458878534,
"count": 465106,
"is_parallel": true,
"self": 62.547223458878534
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 547.7093811879517,
"count": 232554,
"self": 6.451775843874657,
"children": {
"process_trajectory": {
"total": 137.95586376607565,
"count": 232554,
"self": 136.4886579830756,
"children": {
"RLTrainer._checkpoint": {
"total": 1.467205783000054,
"count": 10,
"self": 1.467205783000054
}
}
},
"_update_policy": {
"total": 403.3017415780015,
"count": 97,
"self": 342.7747839860043,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.52695759199719,
"count": 2910,
"self": 60.52695759199719
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3389999367063865e-06,
"count": 1,
"self": 1.3389999367063865e-06
},
"TrainerController._save_models": {
"total": 0.12396011499959059,
"count": 1,
"self": 0.0020599819995368307,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12190013300005376,
"count": 1,
"self": 0.12190013300005376
}
}
}
}
}
}
}