Quacktab's picture
First Push
f3b4f33
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 2.710561513900757,
"min": 2.710561513900757,
"max": 2.8506104946136475,
"count": 2
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 27878.125,
"min": 27878.125,
"max": 29255.81640625,
"count": 2
},
"SnowballTarget.Step.mean": {
"value": 19992.0,
"min": 9952.0,
"max": 19992.0,
"count": 2
},
"SnowballTarget.Step.sum": {
"value": 19992.0,
"min": 9952.0,
"max": 19992.0,
"count": 2
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 1.535680890083313,
"min": 0.36864572763442993,
"max": 1.535680890083313,
"count": 2
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 314.8145751953125,
"min": 71.51727294921875,
"max": 314.8145751953125,
"count": 2
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 2
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 2
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06610987260184416,
"min": 0.06405340450612808,
"max": 0.06610987260184416,
"count": 2
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3305493630092208,
"min": 0.2562136180245123,
"max": 0.3305493630092208,
"count": 2
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21005862042600035,
"min": 0.13078999917671158,
"max": 0.21005862042600035,
"count": 2
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.0502931021300017,
"min": 0.5231599967068463,
"max": 1.0502931021300017,
"count": 2
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.0014999999999999998,
"min": 0.0012,
"max": 0.0014999999999999998,
"count": 2
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.19999999999999996,
"min": 0.19999999999999996,
"max": 0.19999999999999996,
"count": 2
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.9999999999999998,
"min": 0.7999999999999998,
"max": 0.9999999999999998,
"count": 2
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.005,
"min": 0.005,
"max": 0.005,
"count": 2
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.025,
"min": 0.02,
"max": 0.025,
"count": 2
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 6.509090909090909,
"min": 3.7954545454545454,
"max": 6.509090909090909,
"count": 2
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 358.0,
"min": 167.0,
"max": 358.0,
"count": 2
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 6.509090909090909,
"min": 3.7954545454545454,
"max": 6.509090909090909,
"count": 2
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 358.0,
"min": 167.0,
"max": 358.0,
"count": 2
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1696408985",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1696409041"
},
"total": 56.14561265699999,
"count": 1,
"self": 0.44773317099998167,
"children": {
"run_training.setup": {
"total": 0.06996562400001949,
"count": 1,
"self": 0.06996562400001949
},
"TrainerController.start_learning": {
"total": 55.627913861999986,
"count": 1,
"self": 0.09587171400002603,
"children": {
"TrainerController._reset_env": {
"total": 4.554128864000006,
"count": 1,
"self": 4.554128864000006
},
"TrainerController.advance": {
"total": 50.81839375599995,
"count": 1869,
"self": 0.03267294400075116,
"children": {
"env_step": {
"total": 50.7857208119992,
"count": 1869,
"self": 36.709884957000014,
"children": {
"SubprocessEnvManager._take_step": {
"total": 14.042961609999622,
"count": 1869,
"self": 0.17577429999954575,
"children": {
"TorchPolicy.evaluate": {
"total": 13.867187310000077,
"count": 1869,
"self": 13.867187310000077
}
}
},
"workers": {
"total": 0.0328742449995616,
"count": 1869,
"self": 0.0,
"children": {
"worker_root": {
"total": 55.33118334000005,
"count": 1869,
"is_parallel": true,
"self": 27.655750155000334,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0065718150000009246,
"count": 1,
"is_parallel": true,
"self": 0.004153140999960669,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002418674000040255,
"count": 10,
"is_parallel": true,
"self": 0.002418674000040255
}
}
},
"UnityEnvironment.step": {
"total": 0.03784793400001263,
"count": 1,
"is_parallel": true,
"self": 0.0006797019999851273,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00032222200002252066,
"count": 1,
"is_parallel": true,
"self": 0.00032222200002252066
},
"communicator.exchange": {
"total": 0.03365325900000471,
"count": 1,
"is_parallel": true,
"self": 0.03365325900000471
},
"steps_from_proto": {
"total": 0.0031927510000002712,
"count": 1,
"is_parallel": true,
"self": 0.00040913099994099866,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0027836200000592726,
"count": 10,
"is_parallel": true,
"self": 0.0027836200000592726
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 27.67543318499972,
"count": 1868,
"is_parallel": true,
"self": 1.1750034490006556,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.5764743549996751,
"count": 1868,
"is_parallel": true,
"self": 0.5764743549996751
},
"communicator.exchange": {
"total": 21.954991661999685,
"count": 1868,
"is_parallel": true,
"self": 21.954991661999685
},
"steps_from_proto": {
"total": 3.968963718999703,
"count": 1868,
"is_parallel": true,
"self": 0.7515924170013193,
"children": {
"_process_rank_one_or_two_observation": {
"total": 3.2173713019983836,
"count": 18680,
"is_parallel": true,
"self": 3.2173713019983836
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00032362100000682403,
"count": 1,
"self": 0.00032362100000682403,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 50.43674295300315,
"count": 50266,
"is_parallel": true,
"self": 1.1173564080018252,
"children": {
"process_trajectory": {
"total": 28.359238668001353,
"count": 50266,
"is_parallel": true,
"self": 28.359238668001353
},
"_update_policy": {
"total": 20.96014787699997,
"count": 9,
"is_parallel": true,
"self": 7.089985407000086,
"children": {
"TorchPPOOptimizer.update": {
"total": 13.870162469999883,
"count": 456,
"is_parallel": true,
"self": 13.870162469999883
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1591959069999973,
"count": 1,
"self": 0.0008435199999894394,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15835238700000787,
"count": 1,
"self": 0.15835238700000787
}
}
}
}
}
}
}