Huggy-Doggy-PPO / run_logs /timers.json
kinkpunk's picture
Huggy fetch a stick!
d63f0e4
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4268600940704346,
"min": 1.4192981719970703,
"max": 1.4281470775604248,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71204.6015625,
"min": 69546.4375,
"max": 77273.6875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 86.68070175438596,
"min": 79.73709677419355,
"max": 393.890625,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49408.0,
"min": 49313.0,
"max": 50418.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999903.0,
"min": 49837.0,
"max": 1999903.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999903.0,
"min": 49837.0,
"max": 1999903.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.416405200958252,
"min": 0.10270270705223083,
"max": 2.4499049186706543,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1377.3509521484375,
"min": 13.043243408203125,
"max": 1468.356689453125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.76388264538949,
"min": 1.8354441755869257,
"max": 3.889473467922667,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2145.4131078720093,
"min": 233.10141029953957,
"max": 2319.6008169054985,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.76388264538949,
"min": 1.8354441755869257,
"max": 3.889473467922667,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2145.4131078720093,
"min": 233.10141029953957,
"max": 2319.6008169054985,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01695113264733866,
"min": 0.013735476405903075,
"max": 0.021620940843907494,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.050853397942015974,
"min": 0.02747095281180615,
"max": 0.058254778739016425,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0796708437303702,
"min": 0.02217086549434397,
"max": 0.08398792259395123,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.2390125311911106,
"min": 0.04572788986066977,
"max": 0.2390125311911106,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 1.1757154910499976e-06,
"min": 1.1757154910499976e-06,
"max": 9.843932656067501e-05,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 3.5271464731499926e-06,
"min": 3.5271464731499926e-06,
"max": 0.00028134871865129993,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1011756166666667,
"min": 0.1011756166666667,
"max": 0.19843932500000006,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3035268500000001,
"min": 0.20749585,
"max": 0.5813487000000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.866327166666653e-05,
"min": 6.866327166666653e-05,
"max": 0.0049221223174999995,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002059898149999996,
"min": 0.0002059898149999996,
"max": 0.01406930013,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670617976",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy-Doggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670620361"
},
"total": 2385.1356751060002,
"count": 1,
"self": 0.4462831650002954,
"children": {
"run_training.setup": {
"total": 0.1230319649998819,
"count": 1,
"self": 0.1230319649998819
},
"TrainerController.start_learning": {
"total": 2384.566359976,
"count": 1,
"self": 4.009350491061468,
"children": {
"TrainerController._reset_env": {
"total": 11.347829014999888,
"count": 1,
"self": 11.347829014999888
},
"TrainerController.advance": {
"total": 2369.084417255939,
"count": 232057,
"self": 4.367539872785528,
"children": {
"env_step": {
"total": 1863.0907953130925,
"count": 232057,
"self": 1571.0134818919908,
"children": {
"SubprocessEnvManager._take_step": {
"total": 289.3058330670767,
"count": 232057,
"self": 15.07533838283598,
"children": {
"TorchPolicy.evaluate": {
"total": 274.23049468424074,
"count": 222863,
"self": 69.10152142620518,
"children": {
"TorchPolicy.sample_actions": {
"total": 205.12897325803556,
"count": 222863,
"self": 205.12897325803556
}
}
}
}
},
"workers": {
"total": 2.771480354025016,
"count": 232057,
"self": 0.0,
"children": {
"worker_root": {
"total": 2376.6073853798794,
"count": 232057,
"is_parallel": true,
"self": 1080.2732766918941,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002092592000053628,
"count": 1,
"is_parallel": true,
"self": 0.000313949000201319,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001778642999852309,
"count": 2,
"is_parallel": true,
"self": 0.001778642999852309
}
}
},
"UnityEnvironment.step": {
"total": 0.029567992000011145,
"count": 1,
"is_parallel": true,
"self": 0.0003139840002859273,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019781599985435605,
"count": 1,
"is_parallel": true,
"self": 0.00019781599985435605
},
"communicator.exchange": {
"total": 0.028288260999943304,
"count": 1,
"is_parallel": true,
"self": 0.028288260999943304
},
"steps_from_proto": {
"total": 0.0007679309999275574,
"count": 1,
"is_parallel": true,
"self": 0.000253566999845134,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005143640000824234,
"count": 2,
"is_parallel": true,
"self": 0.0005143640000824234
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1296.3341086879852,
"count": 232056,
"is_parallel": true,
"self": 37.273014168178406,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 81.27129167005569,
"count": 232056,
"is_parallel": true,
"self": 81.27129167005569
},
"communicator.exchange": {
"total": 1078.4215195059496,
"count": 232056,
"is_parallel": true,
"self": 1078.4215195059496
},
"steps_from_proto": {
"total": 99.36828334380152,
"count": 232056,
"is_parallel": true,
"self": 41.09189496089175,
"children": {
"_process_rank_one_or_two_observation": {
"total": 58.27638838290977,
"count": 464112,
"is_parallel": true,
"self": 58.27638838290977
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 501.62608207006133,
"count": 232057,
"self": 6.428262346844576,
"children": {
"process_trajectory": {
"total": 156.50215943121634,
"count": 232057,
"self": 155.99471314121615,
"children": {
"RLTrainer._checkpoint": {
"total": 0.507446290000189,
"count": 4,
"self": 0.507446290000189
}
}
},
"_update_policy": {
"total": 338.6956602920004,
"count": 97,
"self": 283.27085693298204,
"children": {
"TorchPPOOptimizer.update": {
"total": 55.42480335901837,
"count": 2910,
"self": 55.42480335901837
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.509999472356867e-07,
"count": 1,
"self": 9.509999472356867e-07
},
"TrainerController._save_models": {
"total": 0.12476226299986592,
"count": 1,
"self": 0.002226012999926752,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12253624999993917,
"count": 1,
"self": 0.12253624999993917
}
}
}
}
}
}
}