ppo-Huggy / run_logs /timers.json
Rui31415's picture
Huggy
f07f9b7
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4034096002578735,
"min": 1.403408169746399,
"max": 1.427811861038208,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69186.6875,
"min": 68537.65625,
"max": 76636.921875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 87.2664298401421,
"min": 85.11379310344827,
"max": 394.2992125984252,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49131.0,
"min": 48864.0,
"max": 50076.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999969.0,
"min": 49665.0,
"max": 1999969.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999969.0,
"min": 49665.0,
"max": 1999969.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4432075023651123,
"min": 0.015479848720133305,
"max": 2.4547901153564453,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1375.52587890625,
"min": 1.9504609107971191,
"max": 1399.02783203125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7206266097870007,
"min": 1.7433741836557313,
"max": 3.959450693971697,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2094.7127813100815,
"min": 219.66514714062214,
"max": 2217.533618092537,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7206266097870007,
"min": 1.7433741836557313,
"max": 3.959450693971697,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2094.7127813100815,
"min": 219.66514714062214,
"max": 2217.533618092537,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015384134832452726,
"min": 0.01405468558038289,
"max": 0.01903396976898269,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.046152404497358177,
"min": 0.02810937116076578,
"max": 0.056163126659036305,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05613442038496336,
"min": 0.02213188884779811,
"max": 0.05762161711851756,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16840326115489007,
"min": 0.04426377769559622,
"max": 0.16879054456949233,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5160988279999967e-06,
"min": 3.5160988279999967e-06,
"max": 0.000295323526558825,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.054829648399999e-05,
"min": 1.054829648399999e-05,
"max": 0.0008441280186240001,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.101172,
"min": 0.101172,
"max": 0.19844117500000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.303516,
"min": 0.20747610000000002,
"max": 0.5813760000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.848279999999995e-05,
"min": 6.848279999999995e-05,
"max": 0.004922214632499999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020544839999999984,
"min": 0.00020544839999999984,
"max": 0.0140706624,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689176202",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1689178675"
},
"total": 2472.736635842,
"count": 1,
"self": 0.48788476599975183,
"children": {
"run_training.setup": {
"total": 0.07172868399993604,
"count": 1,
"self": 0.07172868399993604
},
"TrainerController.start_learning": {
"total": 2472.1770223920003,
"count": 1,
"self": 4.341361055071957,
"children": {
"TrainerController._reset_env": {
"total": 4.725717091999968,
"count": 1,
"self": 4.725717091999968
},
"TrainerController.advance": {
"total": 2462.9822428969283,
"count": 232100,
"self": 4.632977563888289,
"children": {
"env_step": {
"total": 1911.592300730043,
"count": 232100,
"self": 1611.6754269610105,
"children": {
"SubprocessEnvManager._take_step": {
"total": 297.0394005650311,
"count": 232100,
"self": 16.999890456079015,
"children": {
"TorchPolicy.evaluate": {
"total": 280.0395101089521,
"count": 222884,
"self": 280.0395101089521
}
}
},
"workers": {
"total": 2.877473204001376,
"count": 232100,
"self": 0.0,
"children": {
"worker_root": {
"total": 2464.1897675699365,
"count": 232100,
"is_parallel": true,
"self": 1147.4014937069524,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001114927000003263,
"count": 1,
"is_parallel": true,
"self": 0.00032362100000682403,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007913059999964389,
"count": 2,
"is_parallel": true,
"self": 0.0007913059999964389
}
}
},
"UnityEnvironment.step": {
"total": 0.02869658999998137,
"count": 1,
"is_parallel": true,
"self": 0.000349580999909449,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002109690000224873,
"count": 1,
"is_parallel": true,
"self": 0.0002109690000224873
},
"communicator.exchange": {
"total": 0.027413730000034775,
"count": 1,
"is_parallel": true,
"self": 0.027413730000034775
},
"steps_from_proto": {
"total": 0.0007223100000146587,
"count": 1,
"is_parallel": true,
"self": 0.00020037000001593697,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005219399999987218,
"count": 2,
"is_parallel": true,
"self": 0.0005219399999987218
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1316.7882738629842,
"count": 232099,
"is_parallel": true,
"self": 40.42581853203819,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 81.35278452298292,
"count": 232099,
"is_parallel": true,
"self": 81.35278452298292
},
"communicator.exchange": {
"total": 1096.2591609710048,
"count": 232099,
"is_parallel": true,
"self": 1096.2591609710048
},
"steps_from_proto": {
"total": 98.75050983695826,
"count": 232099,
"is_parallel": true,
"self": 34.33768998688561,
"children": {
"_process_rank_one_or_two_observation": {
"total": 64.41281985007265,
"count": 464198,
"is_parallel": true,
"self": 64.41281985007265
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 546.7569646029968,
"count": 232100,
"self": 6.722231360099499,
"children": {
"process_trajectory": {
"total": 136.89457022489887,
"count": 232100,
"self": 135.56731270389866,
"children": {
"RLTrainer._checkpoint": {
"total": 1.327257521000206,
"count": 10,
"self": 1.327257521000206
}
}
},
"_update_policy": {
"total": 403.1401630179985,
"count": 97,
"self": 341.3205356479957,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.81962737000276,
"count": 2910,
"self": 61.81962737000276
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0849998943740502e-06,
"count": 1,
"self": 1.0849998943740502e-06
},
"TrainerController._save_models": {
"total": 0.12770026299995152,
"count": 1,
"self": 0.0021964610000395624,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12550380199991196,
"count": 1,
"self": 0.12550380199991196
}
}
}
}
}
}
}