ppo-Huggy / run_logs /timers.json
hawkeyedesi's picture
Huggy
9c5f5d5
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4064207077026367,
"min": 1.4064207077026367,
"max": 1.4279085397720337,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 72482.703125,
"min": 67600.03125,
"max": 77833.015625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 76.33539412673879,
"min": 69.73408769448373,
"max": 397.95238095238096,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49389.0,
"min": 48877.0,
"max": 50142.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999984.0,
"min": 49518.0,
"max": 1999984.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999984.0,
"min": 49518.0,
"max": 1999984.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.486198663711548,
"min": 0.08957096934318542,
"max": 2.559314250946045,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1608.570556640625,
"min": 11.196371078491211,
"max": 1730.6673583984375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.830514403270238,
"min": 1.8093375999927521,
"max": 4.0183310794830325,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2478.342818915844,
"min": 226.167199999094,
"max": 2718.638501226902,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.830514403270238,
"min": 1.8093375999927521,
"max": 4.0183310794830325,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2478.342818915844,
"min": 226.167199999094,
"max": 2718.638501226902,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01691506954957731,
"min": 0.013998152362182735,
"max": 0.0199628574870682,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05074520864873193,
"min": 0.02799630472436547,
"max": 0.059396218378484876,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.061821020974053266,
"min": 0.024631244782358407,
"max": 0.0662436667829752,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1854630629221598,
"min": 0.049262489564716815,
"max": 0.1987310003489256,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.849848716750009e-06,
"min": 3.849848716750009e-06,
"max": 0.000295320976559675,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1549546150250026e-05,
"min": 1.1549546150250026e-05,
"max": 0.0008439871686709497,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10128325,
"min": 0.10128325,
"max": 0.198440325,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30384975000000003,
"min": 0.20770080000000002,
"max": 0.5813290500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.403417500000013e-05,
"min": 7.403417500000013e-05,
"max": 0.0049221722175,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002221025250000004,
"min": 0.0002221025250000004,
"max": 0.014068319595000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671759686",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671761924"
},
"total": 2238.532246399,
"count": 1,
"self": 0.4453069040000628,
"children": {
"run_training.setup": {
"total": 0.10812149299999874,
"count": 1,
"self": 0.10812149299999874
},
"TrainerController.start_learning": {
"total": 2237.978818002,
"count": 1,
"self": 3.8273492419803006,
"children": {
"TrainerController._reset_env": {
"total": 8.177701225999954,
"count": 1,
"self": 8.177701225999954
},
"TrainerController.advance": {
"total": 2225.85036984502,
"count": 233741,
"self": 3.865773821989933,
"children": {
"env_step": {
"total": 1745.464731547052,
"count": 233741,
"self": 1468.134291971996,
"children": {
"SubprocessEnvManager._take_step": {
"total": 274.7670498400539,
"count": 233741,
"self": 14.069244050044063,
"children": {
"TorchPolicy.evaluate": {
"total": 260.69780579000985,
"count": 223030,
"self": 65.18637481803614,
"children": {
"TorchPolicy.sample_actions": {
"total": 195.5114309719737,
"count": 223030,
"self": 195.5114309719737
}
}
}
}
},
"workers": {
"total": 2.5633897350022607,
"count": 233741,
"self": 0.0,
"children": {
"worker_root": {
"total": 2230.138199403063,
"count": 233741,
"is_parallel": true,
"self": 1018.4459179230948,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002022604999979194,
"count": 1,
"is_parallel": true,
"self": 0.0003561390000186293,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016664659999605647,
"count": 2,
"is_parallel": true,
"self": 0.0016664659999605647
}
}
},
"UnityEnvironment.step": {
"total": 0.034828823999987435,
"count": 1,
"is_parallel": true,
"self": 0.00030125700004646205,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021275400001741218,
"count": 1,
"is_parallel": true,
"self": 0.00021275400001741218
},
"communicator.exchange": {
"total": 0.0334279940000215,
"count": 1,
"is_parallel": true,
"self": 0.0334279940000215
},
"steps_from_proto": {
"total": 0.0008868189999020615,
"count": 1,
"is_parallel": true,
"self": 0.0002551589998347481,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006316600000673134,
"count": 2,
"is_parallel": true,
"self": 0.0006316600000673134
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1211.6922814799682,
"count": 233740,
"is_parallel": true,
"self": 35.30267312492197,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 76.50228201514574,
"count": 233740,
"is_parallel": true,
"self": 76.50228201514574
},
"communicator.exchange": {
"total": 1006.4216405950289,
"count": 233740,
"is_parallel": true,
"self": 1006.4216405950289
},
"steps_from_proto": {
"total": 93.46568574487162,
"count": 233740,
"is_parallel": true,
"self": 38.589315607838785,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.876370137032836,
"count": 467480,
"is_parallel": true,
"self": 54.876370137032836
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 476.5198644759779,
"count": 233741,
"self": 5.894385987050782,
"children": {
"process_trajectory": {
"total": 150.65354340692556,
"count": 233741,
"self": 149.46065206692606,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1928913399995054,
"count": 10,
"self": 1.1928913399995054
}
}
},
"_update_policy": {
"total": 319.97193508200155,
"count": 97,
"self": 266.52652079700465,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.4454142849969,
"count": 2910,
"self": 53.4454142849969
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0170001587539446e-06,
"count": 1,
"self": 1.0170001587539446e-06
},
"TrainerController._save_models": {
"total": 0.12339667199967153,
"count": 1,
"self": 0.0019790079995800625,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12141766400009146,
"count": 1,
"self": 0.12141766400009146
}
}
}
}
}
}
}