ppo-Huggy / run_logs /timers.json
J4m35M4xw3ll's picture
Huggy
cc94f48
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4244053363800049,
"min": 1.4195632934570312,
"max": 1.4244053363800049,
"count": 4
},
"Huggy.Policy.Entropy.sum": {
"value": 68976.828125,
"min": 68641.21875,
"max": 77797.75,
"count": 4
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 254.55102040816325,
"min": 254.55102040816325,
"max": 411.11475409836066,
"count": 4
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49892.0,
"min": 49752.0,
"max": 50156.0,
"count": 4
},
"Huggy.Step.mean": {
"value": 199741.0,
"min": 49996.0,
"max": 199741.0,
"count": 4
},
"Huggy.Step.sum": {
"value": 199741.0,
"min": 49996.0,
"max": 199741.0,
"count": 4
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.8689877390861511,
"min": 0.12381373345851898,
"max": 0.8689877390861511,
"count": 4
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 170.32159423828125,
"min": 14.981461524963379,
"max": 170.32159423828125,
"count": 4
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 2.717016255673097,
"min": 2.04694689773331,
"max": 3.0985174133519457,
"count": 4
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 532.535186111927,
"min": 247.68057462573051,
"max": 532.535186111927,
"count": 4
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 2.717016255673097,
"min": 2.04694689773331,
"max": 3.0985174133519457,
"count": 4
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 532.535186111927,
"min": 247.68057462573051,
"max": 532.535186111927,
"count": 4
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017821442436494785,
"min": 0.016230978905029284,
"max": 0.017821442436494785,
"count": 4
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03564288487298957,
"min": 0.03246195781005857,
"max": 0.05238262479057691,
"count": 4
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.02292571033661564,
"min": 0.021944397625823814,
"max": 0.026212223805487156,
"count": 4
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.04585142067323128,
"min": 0.04388879525164763,
"max": 0.07020988961060842,
"count": 4
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.500183833275001e-05,
"min": 3.500183833275001e-05,
"max": 0.00025331701556099994,
"count": 4
},
"Huggy.Policy.LearningRate.sum": {
"value": 7.000367666550001e-05,
"min": 7.000367666550001e-05,
"max": 0.0005066340311219999,
"count": 4
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.11166725,
"min": 0.11166725,
"max": 0.18443899999999996,
"count": 4
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.2233345,
"min": 0.2233345,
"max": 0.41308350000000005,
"count": 4
},
"Huggy.Policy.Beta.mean": {
"value": 0.0005921957750000002,
"min": 0.0005921957750000002,
"max": 0.0042235061000000015,
"count": 4
},
"Huggy.Policy.Beta.sum": {
"value": 0.0011843915500000003,
"min": 0.0011843915500000003,
"max": 0.008447012200000003,
"count": 4
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 4
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 4
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1687530985",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1687531256"
},
"total": 271.48782126799995,
"count": 1,
"self": 0.43123991599998135,
"children": {
"run_training.setup": {
"total": 0.040694286999951146,
"count": 1,
"self": 0.040694286999951146
},
"TrainerController.start_learning": {
"total": 271.015887065,
"count": 1,
"self": 0.5082530570028894,
"children": {
"TrainerController._reset_env": {
"total": 3.982811718999983,
"count": 1,
"self": 3.982811718999983
},
"TrainerController.advance": {
"total": 266.3849848509973,
"count": 22816,
"self": 0.5288923970009591,
"children": {
"env_step": {
"total": 215.09847486799117,
"count": 22816,
"self": 179.90693031400917,
"children": {
"SubprocessEnvManager._take_step": {
"total": 34.87338318498348,
"count": 22816,
"self": 2.03944584297642,
"children": {
"TorchPolicy.evaluate": {
"total": 32.83393734200706,
"count": 22533,
"self": 32.83393734200706
}
}
},
"workers": {
"total": 0.318161368998517,
"count": 22816,
"self": 0.0,
"children": {
"worker_root": {
"total": 269.87286573598635,
"count": 22816,
"is_parallel": true,
"self": 123.2757634559988,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008100099998955557,
"count": 1,
"is_parallel": true,
"self": 0.00025565799990090454,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005543519999946511,
"count": 2,
"is_parallel": true,
"self": 0.0005543519999946511
}
}
},
"UnityEnvironment.step": {
"total": 0.0337738599999966,
"count": 1,
"is_parallel": true,
"self": 0.0003362620000189054,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00024197000004733127,
"count": 1,
"is_parallel": true,
"self": 0.00024197000004733127
},
"communicator.exchange": {
"total": 0.032390163999934884,
"count": 1,
"is_parallel": true,
"self": 0.032390163999934884
},
"steps_from_proto": {
"total": 0.0008054639999954816,
"count": 1,
"is_parallel": true,
"self": 0.000254898999969555,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005505650000259266,
"count": 2,
"is_parallel": true,
"self": 0.0005505650000259266
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 146.59710227998755,
"count": 22815,
"is_parallel": true,
"self": 4.272444693979082,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 9.327795255990395,
"count": 22815,
"is_parallel": true,
"self": 9.327795255990395
},
"communicator.exchange": {
"total": 122.50915712401797,
"count": 22815,
"is_parallel": true,
"self": 122.50915712401797
},
"steps_from_proto": {
"total": 10.4877052060001,
"count": 22815,
"is_parallel": true,
"self": 3.9249860689864136,
"children": {
"_process_rank_one_or_two_observation": {
"total": 6.562719137013687,
"count": 45630,
"is_parallel": true,
"self": 6.562719137013687
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 50.75761758600515,
"count": 22816,
"self": 0.8203720649969455,
"children": {
"process_trajectory": {
"total": 11.651082952008323,
"count": 22816,
"self": 11.515765910008327,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13531704199999695,
"count": 1,
"self": 0.13531704199999695
}
}
},
"_update_policy": {
"total": 38.286162568999885,
"count": 9,
"self": 32.75401158199952,
"children": {
"TorchPPOOptimizer.update": {
"total": 5.532150987000364,
"count": 270,
"self": 5.532150987000364
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.6259998574241763e-06,
"count": 1,
"self": 1.6259998574241763e-06
},
"TrainerController._save_models": {
"total": 0.13983581200000117,
"count": 1,
"self": 0.002115994999940085,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13771981700006108,
"count": 1,
"self": 0.13771981700006108
}
}
}
}
}
}
}