dhinman's picture
First push
0989489
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.4351891577243805,
"min": 0.4124315679073334,
"max": 2.8701202869415283,
"count": 200
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 4475.92041015625,
"min": 4037.46240234375,
"max": 29392.90234375,
"count": 200
},
"SnowballTarget.Step.mean": {
"value": 1999992.0,
"min": 9952.0,
"max": 1999992.0,
"count": 200
},
"SnowballTarget.Step.sum": {
"value": 1999992.0,
"min": 9952.0,
"max": 1999992.0,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.28770923614502,
"min": 0.2245347946882248,
"max": 14.431157112121582,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2928.98046875,
"min": 43.559749603271484,
"max": 2952.845703125,
"count": 200
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 200
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 200
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0699265562830203,
"min": 0.06112103356951824,
"max": 0.0775976367634333,
"count": 200
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3496327814151015,
"min": 0.24448413427807297,
"max": 0.3879881838171665,
"count": 200
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1627186337522432,
"min": 0.11146447093308193,
"max": 0.26447473488309803,
"count": 200
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.813593168761216,
"min": 0.4458578837323277,
"max": 1.2789767162472594,
"count": 200
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.032997656000039e-07,
"min": 7.032997656000039e-07,
"max": 0.00029918820027059994,
"count": 200
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.5164988280000196e-06,
"min": 3.5164988280000196e-06,
"max": 0.0014885160038279998,
"count": 200
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10023440000000002,
"min": 0.10023440000000002,
"max": 0.1997294,
"count": 200
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5011720000000001,
"min": 0.4029176,
"max": 0.996172,
"count": 200
},
"SnowballTarget.Policy.Beta.mean": {
"value": 2.1696560000000067e-05,
"min": 2.1696560000000067e-05,
"max": 0.004986497059999999,
"count": 200
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00010848280000000034,
"min": 0.00010848280000000034,
"max": 0.024808982800000004,
"count": 200
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 27.836363636363636,
"min": 3.2045454545454546,
"max": 28.59090909090909,
"count": 200
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1531.0,
"min": 141.0,
"max": 1566.0,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 27.836363636363636,
"min": 3.2045454545454546,
"max": 28.59090909090909,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1531.0,
"min": 141.0,
"max": 1566.0,
"count": 200
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691753275",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1691758072"
},
"total": 4797.353921215001,
"count": 1,
"self": 0.48705223700017086,
"children": {
"run_training.setup": {
"total": 0.0430377640000188,
"count": 1,
"self": 0.0430377640000188
},
"TrainerController.start_learning": {
"total": 4796.823831214,
"count": 1,
"self": 5.9265866639343585,
"children": {
"TrainerController._reset_env": {
"total": 4.720287638999935,
"count": 1,
"self": 4.720287638999935
},
"TrainerController.advance": {
"total": 4786.035077367066,
"count": 181872,
"self": 2.8701099820254967,
"children": {
"env_step": {
"total": 4783.164967385041,
"count": 181872,
"self": 3493.4310696112893,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1286.7908276269527,
"count": 181872,
"self": 18.777313051831698,
"children": {
"TorchPolicy.evaluate": {
"total": 1268.013514575121,
"count": 181872,
"self": 1268.013514575121
}
}
},
"workers": {
"total": 2.9430701467985045,
"count": 181872,
"self": 0.0,
"children": {
"worker_root": {
"total": 4781.1309507520855,
"count": 181872,
"is_parallel": true,
"self": 2244.634284836039,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.009122021000052882,
"count": 1,
"is_parallel": true,
"self": 0.007517592999988665,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016044280000642175,
"count": 10,
"is_parallel": true,
"self": 0.0016044280000642175
}
}
},
"UnityEnvironment.step": {
"total": 0.07782980299998599,
"count": 1,
"is_parallel": true,
"self": 0.0006065569999691434,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003981920000342143,
"count": 1,
"is_parallel": true,
"self": 0.0003981920000342143
},
"communicator.exchange": {
"total": 0.07463931899997078,
"count": 1,
"is_parallel": true,
"self": 0.07463931899997078
},
"steps_from_proto": {
"total": 0.002185735000011846,
"count": 1,
"is_parallel": true,
"self": 0.0003915009998536334,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017942340001582124,
"count": 10,
"is_parallel": true,
"self": 0.0017942340001582124
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2536.4966659160464,
"count": 181871,
"is_parallel": true,
"self": 107.16965771905052,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 53.56370950306939,
"count": 181871,
"is_parallel": true,
"self": 53.56370950306939
},
"communicator.exchange": {
"total": 2010.717184664041,
"count": 181871,
"is_parallel": true,
"self": 2010.717184664041
},
"steps_from_proto": {
"total": 365.0461140298853,
"count": 181871,
"is_parallel": true,
"self": 67.54184858460121,
"children": {
"_process_rank_one_or_two_observation": {
"total": 297.5042654452841,
"count": 1818710,
"is_parallel": true,
"self": 297.5042654452841
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0003264699998908327,
"count": 1,
"self": 0.0003264699998908327,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 4748.895687837884,
"count": 4393645,
"is_parallel": true,
"self": 96.52607736498157,
"children": {
"process_trajectory": {
"total": 2576.4395320578965,
"count": 4393645,
"is_parallel": true,
"self": 2561.9673742028976,
"children": {
"RLTrainer._checkpoint": {
"total": 14.472157854998727,
"count": 40,
"is_parallel": true,
"self": 14.472157854998727
}
}
},
"_update_policy": {
"total": 2075.9300784150055,
"count": 909,
"is_parallel": true,
"self": 824.9928528110536,
"children": {
"TorchPPOOptimizer.update": {
"total": 1250.937225603952,
"count": 46347,
"is_parallel": true,
"self": 1250.937225603952
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.14155307399960293,
"count": 1,
"self": 0.0009410039992872044,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14061207000031573,
"count": 1,
"self": 0.14061207000031573
}
}
}
}
}
}
}