tijstijs's picture
First Push
4226e30
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0194075107574463,
"min": 1.0194075107574463,
"max": 2.877188205718994,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9789.3701171875,
"min": 9789.3701171875,
"max": 29496.93359375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.541040420532227,
"min": 0.4259399473667145,
"max": 11.547035217285156,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2250.5029296875,
"min": 82.6323471069336,
"max": 2355.59521484375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0707222411478784,
"min": 0.06511370453575842,
"max": 0.0742333448787594,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2828889645915136,
"min": 0.2604548181430337,
"max": 0.37116672439379705,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.22258083799890443,
"min": 0.11022809994927443,
"max": 0.27715267387090947,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8903233519956177,
"min": 0.44091239979709773,
"max": 1.3857633693545472,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 22.795454545454547,
"min": 3.1818181818181817,
"max": 22.977272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1003.0,
"min": 140.0,
"max": 1260.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 22.795454545454547,
"min": 3.1818181818181817,
"max": 22.977272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1003.0,
"min": 140.0,
"max": 1260.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690123725",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690124212"
},
"total": 486.606115059,
"count": 1,
"self": 0.8454966439999794,
"children": {
"run_training.setup": {
"total": 0.0356812499999819,
"count": 1,
"self": 0.0356812499999819
},
"TrainerController.start_learning": {
"total": 485.72493716500003,
"count": 1,
"self": 0.5961711770058855,
"children": {
"TrainerController._reset_env": {
"total": 5.643844839999986,
"count": 1,
"self": 5.643844839999986
},
"TrainerController.advance": {
"total": 479.23868350799427,
"count": 18219,
"self": 0.2788696469976344,
"children": {
"env_step": {
"total": 478.95981386099663,
"count": 18219,
"self": 347.64661302601434,
"children": {
"SubprocessEnvManager._take_step": {
"total": 131.01273179199416,
"count": 18219,
"self": 1.848050542998351,
"children": {
"TorchPolicy.evaluate": {
"total": 129.1646812489958,
"count": 18219,
"self": 129.1646812489958
}
}
},
"workers": {
"total": 0.3004690429881407,
"count": 18219,
"self": 0.0,
"children": {
"worker_root": {
"total": 483.98777965299587,
"count": 18219,
"is_parallel": true,
"self": 229.08544470999334,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0058011190000115676,
"count": 1,
"is_parallel": true,
"self": 0.004266357999995307,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015347610000162604,
"count": 10,
"is_parallel": true,
"self": 0.0015347610000162604
}
}
},
"UnityEnvironment.step": {
"total": 0.049313252999979795,
"count": 1,
"is_parallel": true,
"self": 0.0007503939999651266,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00042087200000651137,
"count": 1,
"is_parallel": true,
"self": 0.00042087200000651137
},
"communicator.exchange": {
"total": 0.04561193000000685,
"count": 1,
"is_parallel": true,
"self": 0.04561193000000685
},
"steps_from_proto": {
"total": 0.0025300570000013067,
"count": 1,
"is_parallel": true,
"self": 0.0007084729998609873,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018215840001403194,
"count": 10,
"is_parallel": true,
"self": 0.0018215840001403194
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 254.90233494300253,
"count": 18218,
"is_parallel": true,
"self": 10.688816325993514,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.424652592003895,
"count": 18218,
"is_parallel": true,
"self": 5.424652592003895
},
"communicator.exchange": {
"total": 203.14122218900815,
"count": 18218,
"is_parallel": true,
"self": 203.14122218900815
},
"steps_from_proto": {
"total": 35.64764383599697,
"count": 18218,
"is_parallel": true,
"self": 6.608879299963576,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.038764536033398,
"count": 182180,
"is_parallel": true,
"self": 29.038764536033398
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.955099994840566e-05,
"count": 1,
"self": 9.955099994840566e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 475.48991726804206,
"count": 451656,
"is_parallel": true,
"self": 10.384681249035737,
"children": {
"process_trajectory": {
"total": 259.83223405600666,
"count": 451656,
"is_parallel": true,
"self": 258.6741676330065,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1580664230001503,
"count": 4,
"is_parallel": true,
"self": 1.1580664230001503
}
}
},
"_update_policy": {
"total": 205.27300196299967,
"count": 90,
"is_parallel": true,
"self": 80.74371942899796,
"children": {
"TorchPPOOptimizer.update": {
"total": 124.52928253400171,
"count": 4587,
"is_parallel": true,
"self": 124.52928253400171
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.24613808899994183,
"count": 1,
"self": 0.001254677999895648,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24488341100004618,
"count": 1,
"self": 0.24488341100004618
}
}
}
}
}
}
}