ppo-Huggy / run_logs /timers.json
carro's picture
Huggy
0976bb4
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4041492938995361,
"min": 1.4041492938995361,
"max": 1.4305919408798218,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71671.9921875,
"min": 68586.9296875,
"max": 75685.59375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 81.30756578947368,
"min": 81.30756578947368,
"max": 380.8939393939394,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49435.0,
"min": 48834.0,
"max": 50392.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999967.0,
"min": 49732.0,
"max": 1999967.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999967.0,
"min": 49732.0,
"max": 1999967.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3638744354248047,
"min": 0.051388323307037354,
"max": 2.479180097579956,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1437.2357177734375,
"min": 6.731870174407959,
"max": 1437.2357177734375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6232884599778212,
"min": 1.786965586074436,
"max": 3.9041516516433408,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2202.9593836665154,
"min": 234.0924917757511,
"max": 2213.320209443569,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6232884599778212,
"min": 1.786965586074436,
"max": 3.9041516516433408,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2202.9593836665154,
"min": 234.0924917757511,
"max": 2213.320209443569,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01695369745672603,
"min": 0.01296923612729491,
"max": 0.019780073761648965,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03390739491345206,
"min": 0.02593847225458982,
"max": 0.05645858957626236,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.054878730947772666,
"min": 0.021723475183049838,
"max": 0.05862364477167527,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.10975746189554533,
"min": 0.043446950366099676,
"max": 0.17057112840314706,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.595423468225005e-06,
"min": 4.595423468225005e-06,
"max": 0.00029532262655912505,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.19084693645001e-06,
"min": 9.19084693645001e-06,
"max": 0.0008438790187070001,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.101531775,
"min": 0.101531775,
"max": 0.19844087500000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20306355,
"min": 0.20306355,
"max": 0.581293,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.64355725000001e-05,
"min": 8.64355725000001e-05,
"max": 0.0049221996625,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001728711450000002,
"min": 0.0001728711450000002,
"max": 0.0140665207,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679387157",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679389997"
},
"total": 2840.289176532,
"count": 1,
"self": 0.49552389500013305,
"children": {
"run_training.setup": {
"total": 0.20451215399998546,
"count": 1,
"self": 0.20451215399998546
},
"TrainerController.start_learning": {
"total": 2839.589140483,
"count": 1,
"self": 5.324041436065272,
"children": {
"TrainerController._reset_env": {
"total": 6.479925356999956,
"count": 1,
"self": 6.479925356999956
},
"TrainerController.advance": {
"total": 2827.6155726149345,
"count": 231334,
"self": 5.6048219549261376,
"children": {
"env_step": {
"total": 2218.823672392991,
"count": 231334,
"self": 1853.1257047799368,
"children": {
"SubprocessEnvManager._take_step": {
"total": 362.2005617500215,
"count": 231334,
"self": 20.840407624103477,
"children": {
"TorchPolicy.evaluate": {
"total": 341.36015412591803,
"count": 222987,
"self": 341.36015412591803
}
}
},
"workers": {
"total": 3.4974058630326113,
"count": 231334,
"self": 0.0,
"children": {
"worker_root": {
"total": 2830.235969313048,
"count": 231334,
"is_parallel": true,
"self": 1326.9058976340436,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008736369999837734,
"count": 1,
"is_parallel": true,
"self": 0.0002928700000097706,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005807669999740028,
"count": 2,
"is_parallel": true,
"self": 0.0005807669999740028
}
}
},
"UnityEnvironment.step": {
"total": 0.035053382000000965,
"count": 1,
"is_parallel": true,
"self": 0.0003395399999703841,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00023901000002979345,
"count": 1,
"is_parallel": true,
"self": 0.00023901000002979345
},
"communicator.exchange": {
"total": 0.033646470000007866,
"count": 1,
"is_parallel": true,
"self": 0.033646470000007866
},
"steps_from_proto": {
"total": 0.0008283619999929215,
"count": 1,
"is_parallel": true,
"self": 0.0002625590000207012,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005658029999722203,
"count": 2,
"is_parallel": true,
"self": 0.0005658029999722203
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1503.3300716790043,
"count": 231333,
"is_parallel": true,
"self": 42.80050439583715,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 93.477182235062,
"count": 231333,
"is_parallel": true,
"self": 93.477182235062
},
"communicator.exchange": {
"total": 1261.081463529102,
"count": 231333,
"is_parallel": true,
"self": 1261.081463529102
},
"steps_from_proto": {
"total": 105.97092151900318,
"count": 231333,
"is_parallel": true,
"self": 41.474153796929215,
"children": {
"_process_rank_one_or_two_observation": {
"total": 64.49676772207397,
"count": 462666,
"is_parallel": true,
"self": 64.49676772207397
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 603.1870782670176,
"count": 231334,
"self": 8.060361223030213,
"children": {
"process_trajectory": {
"total": 168.6381810149897,
"count": 231334,
"self": 166.7305266289897,
"children": {
"RLTrainer._checkpoint": {
"total": 1.9076543859999902,
"count": 10,
"self": 1.9076543859999902
}
}
},
"_update_policy": {
"total": 426.4885360289976,
"count": 96,
"self": 368.8617865970086,
"children": {
"TorchPPOOptimizer.update": {
"total": 57.626749431989026,
"count": 2880,
"self": 57.626749431989026
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.008999788609799e-06,
"count": 1,
"self": 1.008999788609799e-06
},
"TrainerController._save_models": {
"total": 0.16960006600038469,
"count": 1,
"self": 0.0038875310006005748,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1657125349997841,
"count": 1,
"self": 0.1657125349997841
}
}
}
}
}
}
}