cold_weapon / run_logs /timers.json
fetiska's picture
1
e6bdbba
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.7313357591629028,
"min": 1.7127974033355713,
"max": 2.8897507190704346,
"count": 33
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 5789.5869140625,
"min": 3986.89013671875,
"max": 9949.412109375,
"count": 33
},
"SnowballTarget.Step.mean": {
"value": 98992.0,
"min": 2968.0,
"max": 98992.0,
"count": 33
},
"SnowballTarget.Step.sum": {
"value": 98992.0,
"min": 2968.0,
"max": 98992.0,
"count": 33
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 8.963665008544922,
"min": 0.10108505934476852,
"max": 8.963665008544922,
"count": 33
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 457.14691162109375,
"min": 4.144487380981445,
"max": 457.14691162109375,
"count": 33
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 33
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 4378.0,
"min": 2189.0,
"max": 4378.0,
"count": 33
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06821459705037448,
"min": 0.056161290620437676,
"max": 0.08300557076090984,
"count": 33
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.13642919410074897,
"min": 0.056161290620437676,
"max": 0.16539050954138385,
"count": 33
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.24401900318323397,
"min": 0.09071497918636191,
"max": 0.3442269283182481,
"count": 33
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.48803800636646794,
"min": 0.09071497918636191,
"max": 0.6462239623069763,
"count": 33
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 6.564097812000004e-06,
"min": 6.564097812000004e-06,
"max": 0.000293664002112,
"count": 33
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.3128195624000008e-05,
"min": 1.3128195624000008e-05,
"max": 0.000554328015224,
"count": 33
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10218800000000001,
"min": 0.10218800000000001,
"max": 0.19788800000000004,
"count": 33
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.20437600000000003,
"min": 0.10548800000000003,
"max": 0.38477600000000006,
"count": 33
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00011918120000000008,
"min": 0.00011918120000000008,
"max": 0.0048946112,
"count": 33
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00023836240000000016,
"min": 0.00023836240000000016,
"max": 0.0092403224,
"count": 33
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 18.714285714285715,
"min": 2.727272727272727,
"max": 19.09090909090909,
"count": 33
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 393.0,
"min": 30.0,
"max": 406.0,
"count": 33
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 18.714285714285715,
"min": 2.727272727272727,
"max": 19.09090909090909,
"count": 33
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 393.0,
"min": 30.0,
"max": 406.0,
"count": 33
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1694866240",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1694866533"
},
"total": 292.990521747,
"count": 1,
"self": 0.5265611539998645,
"children": {
"run_training.setup": {
"total": 0.05236504100003003,
"count": 1,
"self": 0.05236504100003003
},
"TrainerController.start_learning": {
"total": 292.4115955520001,
"count": 1,
"self": 0.44906356599926767,
"children": {
"TrainerController._reset_env": {
"total": 1.0040535400000863,
"count": 1,
"self": 1.0040535400000863
},
"TrainerController.advance": {
"total": 290.8054328090011,
"count": 9117,
"self": 0.2306598060079068,
"children": {
"env_step": {
"total": 290.5747730029932,
"count": 9117,
"self": 232.47457808300373,
"children": {
"SubprocessEnvManager._take_step": {
"total": 57.89622661499152,
"count": 9117,
"self": 1.3152518990029876,
"children": {
"TorchPolicy.evaluate": {
"total": 56.58097471598853,
"count": 9117,
"self": 56.58097471598853
}
}
},
"workers": {
"total": 0.20396830499794305,
"count": 9117,
"self": 0.0,
"children": {
"worker_root": {
"total": 291.25662964599746,
"count": 9117,
"is_parallel": true,
"self": 123.46436249100248,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002140725000117527,
"count": 1,
"is_parallel": true,
"self": 0.0006109940006808756,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015297309994366515,
"count": 10,
"is_parallel": true,
"self": 0.0015297309994366515
}
}
},
"UnityEnvironment.step": {
"total": 0.05819984600020689,
"count": 1,
"is_parallel": true,
"self": 0.0005269930002214096,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045711899997513683,
"count": 1,
"is_parallel": true,
"self": 0.00045711899997513683
},
"communicator.exchange": {
"total": 0.05370774099992559,
"count": 1,
"is_parallel": true,
"self": 0.05370774099992559
},
"steps_from_proto": {
"total": 0.0035079930000847526,
"count": 1,
"is_parallel": true,
"self": 0.0015020980004010198,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020058949996837327,
"count": 10,
"is_parallel": true,
"self": 0.0020058949996837327
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 167.792267154995,
"count": 9116,
"is_parallel": true,
"self": 6.9609572139977445,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3.7738556229912774,
"count": 9116,
"is_parallel": true,
"self": 3.7738556229912774
},
"communicator.exchange": {
"total": 132.94538953800497,
"count": 9116,
"is_parallel": true,
"self": 132.94538953800497
},
"steps_from_proto": {
"total": 24.112064780000992,
"count": 9116,
"is_parallel": true,
"self": 4.669569226967724,
"children": {
"_process_rank_one_or_two_observation": {
"total": 19.442495553033268,
"count": 91160,
"is_parallel": true,
"self": 19.442495553033268
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.913099974146462e-05,
"count": 1,
"self": 4.913099974146462e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 288.7915713470172,
"count": 225280,
"is_parallel": true,
"self": 5.8745017050594015,
"children": {
"process_trajectory": {
"total": 129.6866089769578,
"count": 225280,
"is_parallel": true,
"self": 129.12433366295772,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5622753140000896,
"count": 2,
"is_parallel": true,
"self": 0.5622753140000896
}
}
},
"_update_policy": {
"total": 153.230460665,
"count": 45,
"is_parallel": true,
"self": 50.57828893099645,
"children": {
"TorchPPOOptimizer.update": {
"total": 102.65217173400356,
"count": 2292,
"is_parallel": true,
"self": 102.65217173400356
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.15299650599990855,
"count": 1,
"self": 0.0013579679998656502,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1516385380000429,
"count": 1,
"self": 0.1516385380000429
}
}
}
}
}
}
}