Huggy_Dog_RL / run_logs /timers.json
AjayD53's picture
Huggy the Dog
4353b7c
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4093056917190552,
"min": 1.4093056917190552,
"max": 1.4312716722488403,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69709.8984375,
"min": 69506.15625,
"max": 76491.734375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 93.49338374291115,
"min": 86.17073170731707,
"max": 424.2118644067797,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49458.0,
"min": 48837.0,
"max": 50187.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999985.0,
"min": 49449.0,
"max": 1999985.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999985.0,
"min": 49449.0,
"max": 1999985.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3720998764038086,
"min": 0.1257525086402893,
"max": 2.4413962364196777,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1254.8408203125,
"min": 14.713043212890625,
"max": 1349.5098876953125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.657958654239633,
"min": 1.7533509644687686,
"max": 4.03097974007227,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1935.0601280927658,
"min": 205.14206284284592,
"max": 2204.759352862835,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.657958654239633,
"min": 1.7533509644687686,
"max": 4.03097974007227,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1935.0601280927658,
"min": 205.14206284284592,
"max": 2204.759352862835,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01593951335226949,
"min": 0.012199712907507394,
"max": 0.02117176560811155,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04781854005680847,
"min": 0.024399425815014787,
"max": 0.05288294092363988,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04836036658121481,
"min": 0.02286210260871384,
"max": 0.06512601673603058,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.14508109974364441,
"min": 0.046170519292354585,
"max": 0.17829087190330029,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.547998817366671e-06,
"min": 3.547998817366671e-06,
"max": 0.00029529855156714996,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0643996452100013e-05,
"min": 1.0643996452100013e-05,
"max": 0.0008441752686082499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10118263333333333,
"min": 0.10118263333333333,
"max": 0.19843285000000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3035479,
"min": 0.20749725000000008,
"max": 0.5813917500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.90134033333334e-05,
"min": 6.90134033333334e-05,
"max": 0.004921799215,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020704021000000021,
"min": 0.00020704021000000021,
"max": 0.014071448325000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678187263",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1678192314"
},
"total": 5051.8787399660005,
"count": 1,
"self": 0.7037851770000998,
"children": {
"run_training.setup": {
"total": 0.23463495300001114,
"count": 1,
"self": 0.23463495300001114
},
"TrainerController.start_learning": {
"total": 5050.9403198360005,
"count": 1,
"self": 8.696856835083963,
"children": {
"TrainerController._reset_env": {
"total": 9.557018606000042,
"count": 1,
"self": 9.557018606000042
},
"TrainerController.advance": {
"total": 5032.553397236918,
"count": 231867,
"self": 9.67335531581557,
"children": {
"env_step": {
"total": 3281.0639944201357,
"count": 231867,
"self": 2799.7257796933363,
"children": {
"SubprocessEnvManager._take_step": {
"total": 475.1635666408515,
"count": 231867,
"self": 32.69880358596271,
"children": {
"TorchPolicy.evaluate": {
"total": 442.4647630548888,
"count": 222933,
"self": 66.32888769610668,
"children": {
"TorchPolicy.sample_actions": {
"total": 376.13587535878213,
"count": 222933,
"self": 376.13587535878213
}
}
}
}
},
"workers": {
"total": 6.174648085947865,
"count": 231867,
"self": 0.0,
"children": {
"worker_root": {
"total": 5032.406899796111,
"count": 231867,
"is_parallel": true,
"self": 2794.0508842901304,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0012185479999970994,
"count": 1,
"is_parallel": true,
"self": 0.0004239699999857294,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00079457800001137,
"count": 2,
"is_parallel": true,
"self": 0.00079457800001137
}
}
},
"UnityEnvironment.step": {
"total": 0.03665204200001426,
"count": 1,
"is_parallel": true,
"self": 0.00045391999998400934,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00027404200000091805,
"count": 1,
"is_parallel": true,
"self": 0.00027404200000091805
},
"communicator.exchange": {
"total": 0.035012374000018553,
"count": 1,
"is_parallel": true,
"self": 0.035012374000018553
},
"steps_from_proto": {
"total": 0.0009117060000107813,
"count": 1,
"is_parallel": true,
"self": 0.0002854999999613028,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006262060000494785,
"count": 2,
"is_parallel": true,
"self": 0.0006262060000494785
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2238.356015505981,
"count": 231866,
"is_parallel": true,
"self": 69.80054739529396,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 100.20562726984559,
"count": 231866,
"is_parallel": true,
"self": 100.20562726984559
},
"communicator.exchange": {
"total": 1909.2667613380154,
"count": 231866,
"is_parallel": true,
"self": 1909.2667613380154
},
"steps_from_proto": {
"total": 159.0830795028259,
"count": 231866,
"is_parallel": true,
"self": 58.586043584181596,
"children": {
"_process_rank_one_or_two_observation": {
"total": 100.49703591864431,
"count": 463732,
"is_parallel": true,
"self": 100.49703591864431
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1741.816047500966,
"count": 231867,
"self": 14.544137809874883,
"children": {
"process_trajectory": {
"total": 280.5636381170903,
"count": 231867,
"self": 279.0379551520909,
"children": {
"RLTrainer._checkpoint": {
"total": 1.5256829649994188,
"count": 10,
"self": 1.5256829649994188
}
}
},
"_update_policy": {
"total": 1446.708271574001,
"count": 97,
"self": 379.618941483998,
"children": {
"TorchPPOOptimizer.update": {
"total": 1067.089330090003,
"count": 2910,
"self": 1067.089330090003
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0199992175330408e-06,
"count": 1,
"self": 1.0199992175330408e-06
},
"TrainerController._save_models": {
"total": 0.13304613799937215,
"count": 1,
"self": 0.0033979989993895288,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12964813899998262,
"count": 1,
"self": 0.12964813899998262
}
}
}
}
}
}
}