LMrilo's picture
First Push
d03789e verified
raw
history blame
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.6090871095657349,
"min": 0.6090871095657349,
"max": 2.8816184997558594,
"count": 50
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 5936.1630859375,
"min": 5872.9140625,
"max": 29605.748046875,
"count": 50
},
"SnowballTarget.Step.mean": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Step.sum": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.42320728302002,
"min": 0.30475538969039917,
"max": 13.51329517364502,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2751.757568359375,
"min": 59.1225471496582,
"max": 2753.166015625,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0329300686561813,
"min": 0.023213192794355568,
"max": 0.03723663099784365,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.16465034328090647,
"min": 0.09531767162358544,
"max": 0.1811372876400128,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19858644778529802,
"min": 0.11344349601616463,
"max": 0.33856918041904766,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.9929322389264901,
"min": 0.4537739840646585,
"max": 1.6881386327246826,
"count": 50
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 3.0528989824000023e-06,
"min": 3.0528989824000023e-06,
"max": 0.00029675280108239997,
"count": 50
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.526449491200001e-05,
"min": 1.526449491200001e-05,
"max": 0.001454064015312,
"count": 50
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10101760000000001,
"min": 0.10101760000000001,
"max": 0.19891760000000003,
"count": 50
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5050880000000001,
"min": 0.4119904000000001,
"max": 0.9846880000000001,
"count": 50
},
"SnowballTarget.Policy.Beta.mean": {
"value": 6.077824000000005e-05,
"min": 6.077824000000005e-05,
"max": 0.004945988240000001,
"count": 50
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00030389120000000027,
"min": 0.00030389120000000027,
"max": 0.024235931200000005,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.6,
"min": 2.8636363636363638,
"max": 26.8,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1463.0,
"min": 126.0,
"max": 1474.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.6,
"min": 2.8636363636363638,
"max": 26.8,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1463.0,
"min": 126.0,
"max": 1474.0,
"count": 50
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1717682597",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1717684013"
},
"total": 1415.6463985999999,
"count": 1,
"self": 0.5443490200002543,
"children": {
"run_training.setup": {
"total": 0.0741665099999409,
"count": 1,
"self": 0.0741665099999409
},
"TrainerController.start_learning": {
"total": 1415.0278830699997,
"count": 1,
"self": 2.0964214500070284,
"children": {
"TrainerController._reset_env": {
"total": 4.232486793999897,
"count": 1,
"self": 4.232486793999897
},
"TrainerController.advance": {
"total": 1408.5813485529932,
"count": 45486,
"self": 1.130231669998011,
"children": {
"env_step": {
"total": 1407.4511168829952,
"count": 45486,
"self": 1030.719085756912,
"children": {
"SubprocessEnvManager._take_step": {
"total": 375.637633618016,
"count": 45486,
"self": 6.536876083982861,
"children": {
"TorchPolicy.evaluate": {
"total": 369.10075753403316,
"count": 45486,
"self": 369.10075753403316
}
}
},
"workers": {
"total": 1.0943975080672317,
"count": 45486,
"self": 0.0,
"children": {
"worker_root": {
"total": 1409.8663807310159,
"count": 45486,
"is_parallel": true,
"self": 607.53139942806,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007767209999883562,
"count": 1,
"is_parallel": true,
"self": 0.005305325999870547,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002461884000013015,
"count": 10,
"is_parallel": true,
"self": 0.002461884000013015
}
}
},
"UnityEnvironment.step": {
"total": 0.10888023600000452,
"count": 1,
"is_parallel": true,
"self": 0.002716714000371212,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004594079998696543,
"count": 1,
"is_parallel": true,
"self": 0.0004594079998696543
},
"communicator.exchange": {
"total": 0.09634289699988585,
"count": 1,
"is_parallel": true,
"self": 0.09634289699988585
},
"steps_from_proto": {
"total": 0.009361216999877797,
"count": 1,
"is_parallel": true,
"self": 0.0005615529996703117,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.008799664000207486,
"count": 10,
"is_parallel": true,
"self": 0.008799664000207486
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 802.3349813029558,
"count": 45485,
"is_parallel": true,
"self": 38.60814881793067,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 19.377303806010104,
"count": 45485,
"is_parallel": true,
"self": 19.377303806010104
},
"communicator.exchange": {
"total": 631.7321649120188,
"count": 45485,
"is_parallel": true,
"self": 631.7321649120188
},
"steps_from_proto": {
"total": 112.61736376699628,
"count": 45485,
"is_parallel": true,
"self": 22.2493103680074,
"children": {
"_process_rank_one_or_two_observation": {
"total": 90.36805339898888,
"count": 454850,
"is_parallel": true,
"self": 90.36805339898888
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.000204699999812874,
"count": 1,
"self": 0.000204699999812874,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1391.5981218800764,
"count": 1859009,
"is_parallel": true,
"self": 44.784938175028174,
"children": {
"process_trajectory": {
"total": 811.3936514300483,
"count": 1859009,
"is_parallel": true,
"self": 808.4940429800481,
"children": {
"RLTrainer._checkpoint": {
"total": 2.899608450000187,
"count": 10,
"is_parallel": true,
"self": 2.899608450000187
}
}
},
"_update_policy": {
"total": 535.4195322749999,
"count": 227,
"is_parallel": true,
"self": 195.25170760899277,
"children": {
"TorchPPOOptimizer.update": {
"total": 340.16782466600716,
"count": 2724,
"is_parallel": true,
"self": 340.16782466600716
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.11742157299977407,
"count": 1,
"self": 0.0015210199999273755,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1159005529998467,
"count": 1,
"self": 0.1159005529998467
}
}
}
}
}
}
}