ppo-Huggy / run_logs /timers.json
Segamboam's picture
Huggy
ee51725
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.408309817314148,
"min": 1.408309817314148,
"max": 1.4313771724700928,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70267.6171875,
"min": 68652.0625,
"max": 77671.3125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 170.36986301369862,
"min": 108.46916299559471,
"max": 403.352,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49748.0,
"min": 49228.0,
"max": 50419.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999960.0,
"min": 49795.0,
"max": 1999960.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999960.0,
"min": 49795.0,
"max": 1999960.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.137913942337036,
"min": 0.1004694402217865,
"max": 2.3842742443084717,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 624.2708740234375,
"min": 12.458210945129395,
"max": 1055.901123046875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.147535459227758,
"min": 1.9728028660099353,
"max": 3.7365421282543854,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 919.0803540945053,
"min": 244.62755538523197,
"max": 1633.7183106541634,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.147535459227758,
"min": 1.9728028660099353,
"max": 3.7365421282543854,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 919.0803540945053,
"min": 244.62755538523197,
"max": 1633.7183106541634,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018059810208311926,
"min": 0.01385816164353552,
"max": 0.020582606475606252,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03611962041662385,
"min": 0.02771632328707104,
"max": 0.05947090209810995,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.03168686029190818,
"min": 0.022551580456395943,
"max": 0.06686549426780806,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.06337372058381636,
"min": 0.04510316091279189,
"max": 0.20059648280342418,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.425323524925012e-06,
"min": 4.425323524925012e-06,
"max": 0.0002953529265490249,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.850647049850023e-06,
"min": 8.850647049850023e-06,
"max": 0.0008441713686095499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.101475075,
"min": 0.101475075,
"max": 0.19845097500000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20295015,
"min": 0.20295015,
"max": 0.5813904500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.360624250000015e-05,
"min": 8.360624250000015e-05,
"max": 0.0049227036525,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001672124850000003,
"min": 0.0001672124850000003,
"max": 0.014071383454999996,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673874777",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673876994"
},
"total": 2217.3013282869997,
"count": 1,
"self": 0.39013234300000477,
"children": {
"run_training.setup": {
"total": 0.10862126400002126,
"count": 1,
"self": 0.10862126400002126
},
"TrainerController.start_learning": {
"total": 2216.80257468,
"count": 1,
"self": 3.7674800630406935,
"children": {
"TrainerController._reset_env": {
"total": 9.998401427999966,
"count": 1,
"self": 9.998401427999966
},
"TrainerController.advance": {
"total": 2202.9094371459596,
"count": 230064,
"self": 4.2288819780237645,
"children": {
"env_step": {
"total": 1743.96761652006,
"count": 230064,
"self": 1463.6521337211188,
"children": {
"SubprocessEnvManager._take_step": {
"total": 277.68738174493046,
"count": 230064,
"self": 14.190580692939193,
"children": {
"TorchPolicy.evaluate": {
"total": 263.49680105199127,
"count": 223062,
"self": 65.50508261197547,
"children": {
"TorchPolicy.sample_actions": {
"total": 197.9917184400158,
"count": 223062,
"self": 197.9917184400158
}
}
}
}
},
"workers": {
"total": 2.6281010540104717,
"count": 230064,
"self": 0.0,
"children": {
"worker_root": {
"total": 2209.1100403460405,
"count": 230064,
"is_parallel": true,
"self": 1002.7862379129513,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019824009999638292,
"count": 1,
"is_parallel": true,
"self": 0.0003523809999705918,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016300199999932374,
"count": 2,
"is_parallel": true,
"self": 0.0016300199999932374
}
}
},
"UnityEnvironment.step": {
"total": 0.04831391599998369,
"count": 1,
"is_parallel": true,
"self": 0.0002906369999209346,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002033690000189381,
"count": 1,
"is_parallel": true,
"self": 0.0002033690000189381
},
"communicator.exchange": {
"total": 0.04692314900000838,
"count": 1,
"is_parallel": true,
"self": 0.04692314900000838
},
"steps_from_proto": {
"total": 0.0008967610000354398,
"count": 1,
"is_parallel": true,
"self": 0.00024204600003940868,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006547149999960311,
"count": 2,
"is_parallel": true,
"self": 0.0006547149999960311
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1206.3238024330892,
"count": 230063,
"is_parallel": true,
"self": 34.657858344101214,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 76.50286119799478,
"count": 230063,
"is_parallel": true,
"self": 76.50286119799478
},
"communicator.exchange": {
"total": 1002.0049849219098,
"count": 230063,
"is_parallel": true,
"self": 1002.0049849219098
},
"steps_from_proto": {
"total": 93.15809796908343,
"count": 230063,
"is_parallel": true,
"self": 38.24168791200361,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.916410057079815,
"count": 460126,
"is_parallel": true,
"self": 54.916410057079815
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 454.71293864787606,
"count": 230064,
"self": 6.1714097657586535,
"children": {
"process_trajectory": {
"total": 140.49812303811757,
"count": 230064,
"self": 139.30520981411792,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1929132239996534,
"count": 10,
"self": 1.1929132239996534
}
}
},
"_update_policy": {
"total": 308.04340584399984,
"count": 96,
"self": 254.34924633000355,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.69415951399628,
"count": 2880,
"self": 53.69415951399628
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.559998943586834e-07,
"count": 1,
"self": 9.559998943586834e-07
},
"TrainerController._save_models": {
"total": 0.12725508699986676,
"count": 1,
"self": 0.0019962429996667197,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12525884400020004,
"count": 1,
"self": 0.12525884400020004
}
}
}
}
}
}
}