Huggy / run_logs /timers.json
Adrianosoprano's picture
Huggy
19566d4
raw
history blame
17.4 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4031269550323486,
"min": 1.4031269550323486,
"max": 1.4270142316818237,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69724.1875,
"min": 67396.9921875,
"max": 79785.0078125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 74.99088145896657,
"min": 72.48823529411764,
"max": 388.7421875,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49344.0,
"min": 49281.0,
"max": 50329.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999991.0,
"min": 49882.0,
"max": 1999991.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999991.0,
"min": 49882.0,
"max": 1999991.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.429810047149658,
"min": 0.06919645518064499,
"max": 2.534355401992798,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1598.81494140625,
"min": 9.341521263122559,
"max": 1650.199951171875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.741642819258942,
"min": 1.6569878626752783,
"max": 4.088955957658829,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2462.000975072384,
"min": 223.69336146116257,
"max": 2646.7351167798042,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.741642819258942,
"min": 1.6569878626752783,
"max": 4.088955957658829,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2462.000975072384,
"min": 223.69336146116257,
"max": 2646.7351167798042,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017103854105597647,
"min": 0.013916357679651506,
"max": 0.02093486476417941,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.051311562316792944,
"min": 0.028354479445260948,
"max": 0.056769841733087856,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06339871436357498,
"min": 0.02224176445355018,
"max": 0.06971474699676036,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.19019614309072494,
"min": 0.04448352890710036,
"max": 0.19044515726466976,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.990548669850002e-06,
"min": 3.990548669850002e-06,
"max": 0.00029534205155265,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1971646009550007e-05,
"min": 1.1971646009550007e-05,
"max": 0.0008439705186765001,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10133015000000001,
"min": 0.10133015000000001,
"max": 0.19844735000000008,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30399045,
"min": 0.2077893,
"max": 0.5813235,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.637448500000001e-05,
"min": 7.637448500000001e-05,
"max": 0.004922522765000001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022912345500000005,
"min": 0.00022912345500000005,
"max": 0.014068042650000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1692597957",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1692600493"
},
"total": 2536.3151510609996,
"count": 1,
"self": 0.43771095800002513,
"children": {
"run_training.setup": {
"total": 0.04254171100001258,
"count": 1,
"self": 0.04254171100001258
},
"TrainerController.start_learning": {
"total": 2535.8348983919996,
"count": 1,
"self": 4.593474422063082,
"children": {
"TrainerController._reset_env": {
"total": 4.254801568999994,
"count": 1,
"self": 4.254801568999994
},
"TrainerController.advance": {
"total": 2526.8593329349364,
"count": 233369,
"self": 4.696786816910844,
"children": {
"env_step": {
"total": 1943.9045177690002,
"count": 233369,
"self": 1641.8523357219904,
"children": {
"SubprocessEnvManager._take_step": {
"total": 298.97099072493455,
"count": 233369,
"self": 17.181103891992166,
"children": {
"TorchPolicy.evaluate": {
"total": 281.7898868329424,
"count": 222969,
"self": 281.7898868329424
}
}
},
"workers": {
"total": 3.0811913220753695,
"count": 233369,
"self": 0.0,
"children": {
"worker_root": {
"total": 2528.2312179569667,
"count": 233369,
"is_parallel": true,
"self": 1191.2428575929855,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008549980000225332,
"count": 1,
"is_parallel": true,
"self": 0.00021994900004074225,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000635048999981791,
"count": 2,
"is_parallel": true,
"self": 0.000635048999981791
}
}
},
"UnityEnvironment.step": {
"total": 0.031055167000033634,
"count": 1,
"is_parallel": true,
"self": 0.0003132340000320255,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021759200001270074,
"count": 1,
"is_parallel": true,
"self": 0.00021759200001270074
},
"communicator.exchange": {
"total": 0.02968105799999421,
"count": 1,
"is_parallel": true,
"self": 0.02968105799999421
},
"steps_from_proto": {
"total": 0.0008432829999946989,
"count": 1,
"is_parallel": true,
"self": 0.00022616600000446851,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006171169999902304,
"count": 2,
"is_parallel": true,
"self": 0.0006171169999902304
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1336.9883603639812,
"count": 233368,
"is_parallel": true,
"self": 40.307256812026935,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 81.57026382297431,
"count": 233368,
"is_parallel": true,
"self": 81.57026382297431
},
"communicator.exchange": {
"total": 1115.4213561209322,
"count": 233368,
"is_parallel": true,
"self": 1115.4213561209322
},
"steps_from_proto": {
"total": 99.68948360804774,
"count": 233368,
"is_parallel": true,
"self": 35.4182236580931,
"children": {
"_process_rank_one_or_two_observation": {
"total": 64.27125994995464,
"count": 466736,
"is_parallel": true,
"self": 64.27125994995464
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 578.2580283490254,
"count": 233369,
"self": 6.9331971059841635,
"children": {
"process_trajectory": {
"total": 151.0393297340416,
"count": 233369,
"self": 149.52457312004213,
"children": {
"RLTrainer._checkpoint": {
"total": 1.5147566139994524,
"count": 10,
"self": 1.5147566139994524
}
}
},
"_update_policy": {
"total": 420.28550150899963,
"count": 97,
"self": 359.19810729100664,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.087394217992994,
"count": 2910,
"self": 61.087394217992994
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0560002010606695e-06,
"count": 1,
"self": 1.0560002010606695e-06
},
"TrainerController._save_models": {
"total": 0.12728840999989188,
"count": 1,
"self": 0.0020512950000011188,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12523711499989076,
"count": 1,
"self": 0.12523711499989076
}
}
}
}
}
}
}