carlosmirandad's picture
Bonus Unit 1: Huggy first commit
e479011
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.399851679801941,
"min": 1.399851679801941,
"max": 1.4249622821807861,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70453.1328125,
"min": 68537.546875,
"max": 77497.0859375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 75.80952380952381,
"min": 74.65204236006052,
"max": 379.93939393939394,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49352.0,
"min": 49253.0,
"max": 50186.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999908.0,
"min": 49714.0,
"max": 1999908.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999908.0,
"min": 49714.0,
"max": 1999908.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4579620361328125,
"min": 0.15120956301689148,
"max": 2.469007968902588,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1600.13330078125,
"min": 19.808452606201172,
"max": 1600.44482421875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8819351512166214,
"min": 1.7905152867768557,
"max": 3.961153031340114,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2527.1397834420204,
"min": 234.5575025677681,
"max": 2527.1397834420204,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8819351512166214,
"min": 1.7905152867768557,
"max": 3.961153031340114,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2527.1397834420204,
"min": 234.5575025677681,
"max": 2527.1397834420204,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018766548916432337,
"min": 0.013347511469489997,
"max": 0.020791093296793406,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05629964674929701,
"min": 0.026847026636824013,
"max": 0.059764446911867705,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.058798814440766976,
"min": 0.02208628263324499,
"max": 0.06591221100340286,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17639644332230092,
"min": 0.04417256526648998,
"max": 0.17660248776276907,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5346988218e-06,
"min": 3.5346988218e-06,
"max": 0.000295299676566775,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.06040964654e-05,
"min": 1.06040964654e-05,
"max": 0.0008440900686366497,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10117820000000004,
"min": 0.10117820000000004,
"max": 0.19843322499999994,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3035346000000001,
"min": 0.20751185,
"max": 0.5813633500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.879217999999999e-05,
"min": 6.879217999999999e-05,
"max": 0.0049218179275,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020637653999999996,
"min": 0.00020637653999999996,
"max": 0.014070031165000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1672112686",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1672114946"
},
"total": 2259.88853775,
"count": 1,
"self": 0.39289711900028124,
"children": {
"run_training.setup": {
"total": 0.10550258700004633,
"count": 1,
"self": 0.10550258700004633
},
"TrainerController.start_learning": {
"total": 2259.3901380439997,
"count": 1,
"self": 3.8411372311079504,
"children": {
"TrainerController._reset_env": {
"total": 9.301544209999975,
"count": 1,
"self": 9.301544209999975
},
"TrainerController.advance": {
"total": 2246.125502253892,
"count": 232816,
"self": 4.2901587373007715,
"children": {
"env_step": {
"total": 1756.7591993627307,
"count": 232816,
"self": 1475.54039198473,
"children": {
"SubprocessEnvManager._take_step": {
"total": 278.606694678979,
"count": 232816,
"self": 14.825483851002218,
"children": {
"TorchPolicy.evaluate": {
"total": 263.7812108279768,
"count": 222942,
"self": 65.91064549891212,
"children": {
"TorchPolicy.sample_actions": {
"total": 197.87056532906468,
"count": 222942,
"self": 197.87056532906468
}
}
}
}
},
"workers": {
"total": 2.6121126990217363,
"count": 232816,
"self": 0.0,
"children": {
"worker_root": {
"total": 2251.3562358099684,
"count": 232816,
"is_parallel": true,
"self": 1036.28443176408,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001966779999975188,
"count": 1,
"is_parallel": true,
"self": 0.00032394100003330095,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016428389999418869,
"count": 2,
"is_parallel": true,
"self": 0.0016428389999418869
}
}
},
"UnityEnvironment.step": {
"total": 0.02712508100012201,
"count": 1,
"is_parallel": true,
"self": 0.0002905060000557569,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00017539700002089376,
"count": 1,
"is_parallel": true,
"self": 0.00017539700002089376
},
"communicator.exchange": {
"total": 0.02586990000008882,
"count": 1,
"is_parallel": true,
"self": 0.02586990000008882
},
"steps_from_proto": {
"total": 0.0007892779999565391,
"count": 1,
"is_parallel": true,
"self": 0.0002637449997564545,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005255330002000846,
"count": 2,
"is_parallel": true,
"self": 0.0005255330002000846
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1215.0718040458885,
"count": 232815,
"is_parallel": true,
"self": 34.99286933759117,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.94322001117848,
"count": 232815,
"is_parallel": true,
"self": 75.94322001117848
},
"communicator.exchange": {
"total": 1010.1242134161769,
"count": 232815,
"is_parallel": true,
"self": 1010.1242134161769
},
"steps_from_proto": {
"total": 94.01150128094196,
"count": 232815,
"is_parallel": true,
"self": 38.95449838682043,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.057002894121524,
"count": 465630,
"is_parallel": true,
"self": 55.057002894121524
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 485.0761441538607,
"count": 232816,
"self": 5.99370876685316,
"children": {
"process_trajectory": {
"total": 151.23425434100773,
"count": 232816,
"self": 150.04703760400844,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1872167369992894,
"count": 10,
"self": 1.1872167369992894
}
}
},
"_update_policy": {
"total": 327.8481810459998,
"count": 97,
"self": 273.63911365500417,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.20906739099564,
"count": 2910,
"self": 54.20906739099564
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.439997145615052e-07,
"count": 1,
"self": 8.439997145615052e-07
},
"TrainerController._save_models": {
"total": 0.12195350499996493,
"count": 1,
"self": 0.0019346499998391664,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12001885500012577,
"count": 1,
"self": 0.12001885500012577
}
}
}
}
}
}
}