ppo-Huggy / run_logs /timers.json
hyunussarioglu's picture
Huggy
00b54aa
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.408005714416504,
"min": 1.408005714416504,
"max": 1.4311772584915161,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 68303.765625,
"min": 68303.765625,
"max": 77754.40625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 100.07786885245902,
"min": 96.29423076923077,
"max": 396.93650793650795,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 48838.0,
"min": 48838.0,
"max": 50266.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999976.0,
"min": 49691.0,
"max": 1999976.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999976.0,
"min": 49691.0,
"max": 1999976.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.397092580795288,
"min": 0.1799723356962204,
"max": 2.4211385250091553,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1169.7811279296875,
"min": 22.49654197692871,
"max": 1230.152099609375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.726319296682467,
"min": 1.8794116578102111,
"max": 3.8541886832703045,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1818.443816781044,
"min": 234.9264572262764,
"max": 1921.0930666327477,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.726319296682467,
"min": 1.8794116578102111,
"max": 3.8541886832703045,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1818.443816781044,
"min": 234.9264572262764,
"max": 1921.0930666327477,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016848336476444577,
"min": 0.013100478733839635,
"max": 0.019888670139092332,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03369667295288915,
"min": 0.0283447386589008,
"max": 0.059666010417276996,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04902337354918321,
"min": 0.021282983912775916,
"max": 0.057648326456546786,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.09804674709836642,
"min": 0.04256596782555183,
"max": 0.16607637790342172,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.602323465924987e-06,
"min": 4.602323465924987e-06,
"max": 0.0002953713015429,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.204646931849974e-06,
"min": 9.204646931849974e-06,
"max": 0.00084415576861475,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.101534075,
"min": 0.101534075,
"max": 0.1984571,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20306815,
"min": 0.20306815,
"max": 0.5813852500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.655034249999981e-05,
"min": 8.655034249999981e-05,
"max": 0.0049230092900000005,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00017310068499999963,
"min": 0.00017310068499999963,
"max": 0.014071123974999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689502315",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1689504891"
},
"total": 2576.136128309,
"count": 1,
"self": 0.4903040730005159,
"children": {
"run_training.setup": {
"total": 0.04381412099996851,
"count": 1,
"self": 0.04381412099996851
},
"TrainerController.start_learning": {
"total": 2575.6020101149998,
"count": 1,
"self": 4.95383159794892,
"children": {
"TrainerController._reset_env": {
"total": 5.113575024000056,
"count": 1,
"self": 5.113575024000056
},
"TrainerController.advance": {
"total": 2565.404021343051,
"count": 231147,
"self": 4.9067443311109855,
"children": {
"env_step": {
"total": 1992.0653724779606,
"count": 231147,
"self": 1683.612728891045,
"children": {
"SubprocessEnvManager._take_step": {
"total": 305.41315053199185,
"count": 231147,
"self": 17.762970167127833,
"children": {
"TorchPolicy.evaluate": {
"total": 287.650180364864,
"count": 222840,
"self": 287.650180364864
}
}
},
"workers": {
"total": 3.0394930549238097,
"count": 231147,
"self": 0.0,
"children": {
"worker_root": {
"total": 2567.465212134966,
"count": 231147,
"is_parallel": true,
"self": 1191.57415336806,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009488539999438217,
"count": 1,
"is_parallel": true,
"self": 0.00026361799984897516,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006852360000948465,
"count": 2,
"is_parallel": true,
"self": 0.0006852360000948465
}
}
},
"UnityEnvironment.step": {
"total": 0.05017973899998651,
"count": 1,
"is_parallel": true,
"self": 0.00034536299995124864,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002623139999968771,
"count": 1,
"is_parallel": true,
"self": 0.0002623139999968771
},
"communicator.exchange": {
"total": 0.04878806199997143,
"count": 1,
"is_parallel": true,
"self": 0.04878806199997143
},
"steps_from_proto": {
"total": 0.0007840000000669534,
"count": 1,
"is_parallel": true,
"self": 0.00022481300004528748,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005591870000216659,
"count": 2,
"is_parallel": true,
"self": 0.0005591870000216659
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1375.8910587669059,
"count": 231146,
"is_parallel": true,
"self": 42.84415243890794,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 84.38831256704191,
"count": 231146,
"is_parallel": true,
"self": 84.38831256704191
},
"communicator.exchange": {
"total": 1144.842078958984,
"count": 231146,
"is_parallel": true,
"self": 1144.842078958984
},
"steps_from_proto": {
"total": 103.81651480197195,
"count": 231146,
"is_parallel": true,
"self": 36.39690147995873,
"children": {
"_process_rank_one_or_two_observation": {
"total": 67.41961332201322,
"count": 462292,
"is_parallel": true,
"self": 67.41961332201322
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 568.4319045339796,
"count": 231147,
"self": 7.163790316975678,
"children": {
"process_trajectory": {
"total": 139.0526748780061,
"count": 231147,
"self": 137.57514355700653,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4775313209995602,
"count": 10,
"self": 1.4775313209995602
}
}
},
"_update_policy": {
"total": 422.21543933899784,
"count": 96,
"self": 360.0581694780079,
"children": {
"TorchPPOOptimizer.update": {
"total": 62.15726986098991,
"count": 2880,
"self": 62.15726986098991
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.327999598288443e-06,
"count": 1,
"self": 1.327999598288443e-06
},
"TrainerController._save_models": {
"total": 0.13058082199995624,
"count": 1,
"self": 0.0027426669998931175,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12783815500006313,
"count": 1,
"self": 0.12783815500006313
}
}
}
}
}
}
}