ppo-Huggy / run_logs /timers.json
AIingit's picture
Huggy
b1e2ffd
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4078574180603027,
"min": 1.4078574180603027,
"max": 1.4301613569259644,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70841.9765625,
"min": 67754.78125,
"max": 76122.1796875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 76.86635944700461,
"min": 67.8956043956044,
"max": 386.8914728682171,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 50040.0,
"min": 48912.0,
"max": 50076.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999966.0,
"min": 49889.0,
"max": 1999966.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999966.0,
"min": 49889.0,
"max": 1999966.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.5293922424316406,
"min": 0.1025739461183548,
"max": 2.575030565261841,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1646.6343994140625,
"min": 13.129465103149414,
"max": 1806.618408203125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.966564084619238,
"min": 1.803706705570221,
"max": 4.125192184086781,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2582.233219087124,
"min": 230.87445831298828,
"max": 2886.1612099409103,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.966564084619238,
"min": 1.803706705570221,
"max": 4.125192184086781,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2582.233219087124,
"min": 230.87445831298828,
"max": 2886.1612099409103,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01702461286800422,
"min": 0.014041434816075102,
"max": 0.01965671237752152,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.051073838604012656,
"min": 0.028082869632150204,
"max": 0.05879692649347514,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05914722937676642,
"min": 0.022289731353521348,
"max": 0.0620806416703595,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17744168813029926,
"min": 0.044579462707042695,
"max": 0.1862419250110785,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.7118987627333314e-06,
"min": 3.7118987627333314e-06,
"max": 0.00029528235157255005,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1135696288199994e-05,
"min": 1.1135696288199994e-05,
"max": 0.00084384496871835,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1012372666666667,
"min": 0.1012372666666667,
"max": 0.19842745,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3037118000000001,
"min": 0.20767225,
"max": 0.5812816500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.173960666666662e-05,
"min": 7.173960666666662e-05,
"max": 0.004921529755000001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021521881999999988,
"min": 0.00021521881999999988,
"max": 0.014065954335000005,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675623992",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675626123"
},
"total": 2130.8487134619995,
"count": 1,
"self": 0.3888835949996974,
"children": {
"run_training.setup": {
"total": 0.0977784089996021,
"count": 1,
"self": 0.0977784089996021
},
"TrainerController.start_learning": {
"total": 2130.362051458,
"count": 1,
"self": 3.55709791809295,
"children": {
"TrainerController._reset_env": {
"total": 10.585762834999969,
"count": 1,
"self": 10.585762834999969
},
"TrainerController.advance": {
"total": 2116.0979768929074,
"count": 233743,
"self": 3.8704610150221015,
"children": {
"env_step": {
"total": 1666.2589745950581,
"count": 233743,
"self": 1402.279387852012,
"children": {
"SubprocessEnvManager._take_step": {
"total": 261.50899192898123,
"count": 233743,
"self": 13.847264040148275,
"children": {
"TorchPolicy.evaluate": {
"total": 247.66172788883296,
"count": 222986,
"self": 62.70197790679185,
"children": {
"TorchPolicy.sample_actions": {
"total": 184.9597499820411,
"count": 222986,
"self": 184.9597499820411
}
}
}
}
},
"workers": {
"total": 2.4705948140649525,
"count": 233743,
"self": 0.0,
"children": {
"worker_root": {
"total": 2122.677155303872,
"count": 233743,
"is_parallel": true,
"self": 965.869661055754,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017267650000576396,
"count": 1,
"is_parallel": true,
"self": 0.0003053030004593893,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014214619995982503,
"count": 2,
"is_parallel": true,
"self": 0.0014214619995982503
}
}
},
"UnityEnvironment.step": {
"total": 0.02605721200006883,
"count": 1,
"is_parallel": true,
"self": 0.0002657220006767602,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001788019999366952,
"count": 1,
"is_parallel": true,
"self": 0.0001788019999366952
},
"communicator.exchange": {
"total": 0.02492233699967983,
"count": 1,
"is_parallel": true,
"self": 0.02492233699967983
},
"steps_from_proto": {
"total": 0.0006903509997755464,
"count": 1,
"is_parallel": true,
"self": 0.00022750099924451206,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004628500005310343,
"count": 2,
"is_parallel": true,
"self": 0.0004628500005310343
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1156.807494248118,
"count": 233742,
"is_parallel": true,
"self": 33.395847058127856,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 73.39666670309725,
"count": 233742,
"is_parallel": true,
"self": 73.39666670309725
},
"communicator.exchange": {
"total": 957.3761743678697,
"count": 233742,
"is_parallel": true,
"self": 957.3761743678697
},
"steps_from_proto": {
"total": 92.63880611902323,
"count": 233742,
"is_parallel": true,
"self": 36.3077713541511,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.33103476487213,
"count": 467484,
"is_parallel": true,
"self": 56.33103476487213
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 445.9685412828271,
"count": 233743,
"self": 5.600560269986545,
"children": {
"process_trajectory": {
"total": 144.95955204484108,
"count": 233743,
"self": 143.86557250484202,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0939795399990544,
"count": 10,
"self": 1.0939795399990544
}
}
},
"_update_policy": {
"total": 295.4084289679995,
"count": 97,
"self": 243.56585523199692,
"children": {
"TorchPPOOptimizer.update": {
"total": 51.84257373600258,
"count": 2910,
"self": 51.84257373600258
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.910001270123757e-07,
"count": 1,
"self": 8.910001270123757e-07
},
"TrainerController._save_models": {
"total": 0.12121292099982384,
"count": 1,
"self": 0.001974349999727565,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11923857100009627,
"count": 1,
"self": 0.11923857100009627
}
}
}
}
}
}
}