ppo-Huggy / run_logs /timers.json
zwtharry's picture
Huggy
4f5f801
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.402205228805542,
"min": 1.402205228805542,
"max": 1.426506757736206,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70289.7421875,
"min": 69166.9609375,
"max": 77072.84375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 80.95894909688013,
"min": 80.22764227642277,
"max": 384.9153846153846,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49304.0,
"min": 48866.0,
"max": 50108.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999951.0,
"min": 49717.0,
"max": 1999951.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999951.0,
"min": 49717.0,
"max": 1999951.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.46866512298584,
"min": 0.08215023577213287,
"max": 2.493443250656128,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1503.4169921875,
"min": 10.597380638122559,
"max": 1503.4169921875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7829865467763693,
"min": 2.0338714774264846,
"max": 4.066064072489739,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2303.838806986809,
"min": 262.3694205880165,
"max": 2369.8820927739143,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7829865467763693,
"min": 2.0338714774264846,
"max": 4.066064072489739,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2303.838806986809,
"min": 262.3694205880165,
"max": 2369.8820927739143,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.019765743900724272,
"min": 0.013843088384601288,
"max": 0.019765743900724272,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05929723170217282,
"min": 0.027686176769202576,
"max": 0.05929723170217282,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.057087204812301534,
"min": 0.0240795914704601,
"max": 0.060657571504513424,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1712616144369046,
"min": 0.0481591829409202,
"max": 0.1758168764412403,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.619948793383334e-06,
"min": 3.619948793383334e-06,
"max": 0.00029534205155264993,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0859846380150003e-05,
"min": 1.0859846380150003e-05,
"max": 0.000844266018578,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10120661666666665,
"min": 0.10120661666666665,
"max": 0.19844735,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30361984999999997,
"min": 0.20755615000000005,
"max": 0.5814220000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.02101716666667e-05,
"min": 7.02101716666667e-05,
"max": 0.004922522765000001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021063051500000012,
"min": 0.00021063051500000012,
"max": 0.014072957800000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688970269",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1688972584"
},
"total": 2315.505838022,
"count": 1,
"self": 0.44061305400009587,
"children": {
"run_training.setup": {
"total": 0.04628638399992724,
"count": 1,
"self": 0.04628638399992724
},
"TrainerController.start_learning": {
"total": 2315.018938584,
"count": 1,
"self": 4.2046122029978505,
"children": {
"TrainerController._reset_env": {
"total": 4.187538060000065,
"count": 1,
"self": 4.187538060000065
},
"TrainerController.advance": {
"total": 2306.508130236002,
"count": 232653,
"self": 4.336184311830948,
"children": {
"env_step": {
"total": 1787.482703128982,
"count": 232653,
"self": 1505.48554131493,
"children": {
"SubprocessEnvManager._take_step": {
"total": 279.3201724170326,
"count": 232653,
"self": 16.01784881513845,
"children": {
"TorchPolicy.evaluate": {
"total": 263.30232360189416,
"count": 222983,
"self": 263.30232360189416
}
}
},
"workers": {
"total": 2.6769893970192697,
"count": 232653,
"self": 0.0,
"children": {
"worker_root": {
"total": 2307.464894696004,
"count": 232653,
"is_parallel": true,
"self": 1078.8940949509738,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009150519999820972,
"count": 1,
"is_parallel": true,
"self": 0.00023982499988051131,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006752270001015859,
"count": 2,
"is_parallel": true,
"self": 0.0006752270001015859
}
}
},
"UnityEnvironment.step": {
"total": 0.04712010100001862,
"count": 1,
"is_parallel": true,
"self": 0.0003886160001229655,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00023722899993572355,
"count": 1,
"is_parallel": true,
"self": 0.00023722899993572355
},
"communicator.exchange": {
"total": 0.045743552999965686,
"count": 1,
"is_parallel": true,
"self": 0.045743552999965686
},
"steps_from_proto": {
"total": 0.0007507029999942461,
"count": 1,
"is_parallel": true,
"self": 0.00022274100001595798,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005279619999782881,
"count": 2,
"is_parallel": true,
"self": 0.0005279619999782881
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1228.5707997450304,
"count": 232652,
"is_parallel": true,
"self": 38.94564535214408,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 77.509704132906,
"count": 232652,
"is_parallel": true,
"self": 77.509704132906
},
"communicator.exchange": {
"total": 1017.1591010549233,
"count": 232652,
"is_parallel": true,
"self": 1017.1591010549233
},
"steps_from_proto": {
"total": 94.956349205057,
"count": 232652,
"is_parallel": true,
"self": 33.351332181047724,
"children": {
"_process_rank_one_or_two_observation": {
"total": 61.60501702400927,
"count": 465304,
"is_parallel": true,
"self": 61.60501702400927
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 514.6892427951891,
"count": 232653,
"self": 6.590821672150241,
"children": {
"process_trajectory": {
"total": 132.5832292460392,
"count": 232653,
"self": 131.17098568403958,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4122435619996168,
"count": 10,
"self": 1.4122435619996168
}
}
},
"_update_policy": {
"total": 375.51519187699967,
"count": 97,
"self": 316.0227749099971,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.492416967002555,
"count": 2910,
"self": 59.492416967002555
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0399999155197293e-06,
"count": 1,
"self": 1.0399999155197293e-06
},
"TrainerController._save_models": {
"total": 0.11865704499996355,
"count": 1,
"self": 0.0018862309998439741,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11677081400011957,
"count": 1,
"self": 0.11677081400011957
}
}
}
}
}
}
}