{ "name": "root", "gauges": { "SnowballTarget.Policy.Entropy.mean": { "value": 0.8856061697006226, "min": 0.8785231709480286, "max": 2.8686530590057373, "count": 20 }, "SnowballTarget.Policy.Entropy.sum": { "value": 8475.2509765625, "min": 8475.2509765625, "max": 29377.875, "count": 20 }, "SnowballTarget.Step.mean": { "value": 199984.0, "min": 9952.0, "max": 199984.0, "count": 20 }, "SnowballTarget.Step.sum": { "value": 199984.0, "min": 9952.0, "max": 199984.0, "count": 20 }, "SnowballTarget.Policy.ExtrinsicValueEstimate.mean": { "value": 12.59672737121582, "min": 0.2983435094356537, "max": 12.755240440368652, "count": 20 }, "SnowballTarget.Policy.ExtrinsicValueEstimate.sum": { "value": 2456.36181640625, "min": 57.87864303588867, "max": 2589.71728515625, "count": 20 }, "SnowballTarget.Environment.EpisodeLength.mean": { "value": 199.0, "min": 199.0, "max": 199.0, "count": 20 }, "SnowballTarget.Environment.EpisodeLength.sum": { "value": 8756.0, "min": 8756.0, "max": 10945.0, "count": 20 }, "SnowballTarget.Losses.PolicyLoss.mean": { "value": 0.06484215036428341, "min": 0.06481837471345024, "max": 0.07627394494021941, "count": 20 }, "SnowballTarget.Losses.PolicyLoss.sum": { "value": 0.25936860145713364, "min": 0.25927349885380097, "max": 0.36200790193534504, "count": 20 }, "SnowballTarget.Losses.ValueLoss.mean": { "value": 0.20339418860042796, "min": 0.11605569468948115, "max": 0.26271184986712887, "count": 20 }, "SnowballTarget.Losses.ValueLoss.sum": { "value": 0.8135767544017118, "min": 0.4642227787579246, "max": 1.3135592493356443, "count": 20 }, "SnowballTarget.Policy.LearningRate.mean": { "value": 8.082097306000005e-06, "min": 8.082097306000005e-06, "max": 0.000291882002706, "count": 20 }, "SnowballTarget.Policy.LearningRate.sum": { "value": 3.232838922400002e-05, "min": 3.232838922400002e-05, "max": 0.00138516003828, "count": 20 }, "SnowballTarget.Policy.Epsilon.mean": { "value": 0.10269400000000001, "min": 0.10269400000000001, "max": 0.19729400000000002, "count": 20 }, "SnowballTarget.Policy.Epsilon.sum": { "value": 0.41077600000000003, "min": 0.41077600000000003, "max": 0.96172, "count": 20 }, "SnowballTarget.Policy.Beta.mean": { "value": 0.0001444306000000001, "min": 0.0001444306000000001, "max": 0.0048649706, "count": 20 }, "SnowballTarget.Policy.Beta.sum": { "value": 0.0005777224000000004, "min": 0.0005777224000000004, "max": 0.023089828, "count": 20 }, "SnowballTarget.Environment.CumulativeReward.mean": { "value": 24.818181818181817, "min": 3.477272727272727, "max": 25.09090909090909, "count": 20 }, "SnowballTarget.Environment.CumulativeReward.sum": { "value": 1092.0, "min": 153.0, "max": 1371.0, "count": 20 }, "SnowballTarget.Policy.ExtrinsicReward.mean": { "value": 24.818181818181817, "min": 3.477272727272727, "max": 25.09090909090909, "count": 20 }, "SnowballTarget.Policy.ExtrinsicReward.sum": { "value": 1092.0, "min": 153.0, "max": 1371.0, "count": 20 }, "SnowballTarget.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 20 }, "SnowballTarget.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 20 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1701655278", "python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.1.1+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1701655742" }, "total": 463.6502280390001, "count": 1, "self": 0.6697580100002369, "children": { "run_training.setup": { "total": 0.07771567899999354, "count": 1, "self": 0.07771567899999354 }, "TrainerController.start_learning": { "total": 462.9027543499999, "count": 1, "self": 0.518994786000917, "children": { "TrainerController._reset_env": { "total": 3.4131496290000314, "count": 1, "self": 3.4131496290000314 }, "TrainerController.advance": { "total": 458.884180524999, "count": 18201, "self": 0.24944113998787998, "children": { "env_step": { "total": 458.63473938501113, "count": 18201, "self": 310.7679812750175, "children": { "SubprocessEnvManager._take_step": { "total": 147.59826931899988, "count": 18201, "self": 1.402091322000956, "children": { "TorchPolicy.evaluate": { "total": 146.19617799699893, "count": 18201, "self": 146.19617799699893 } } }, "workers": { "total": 0.2684887909937288, "count": 18201, "self": 0.0, "children": { "worker_root": { "total": 461.67263348701874, "count": 18201, "is_parallel": true, "self": 228.5188735730129, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.005968991000031565, "count": 1, "is_parallel": true, "self": 0.004468811000037931, "children": { "_process_rank_one_or_two_observation": { "total": 0.0015001799999936338, "count": 10, "is_parallel": true, "self": 0.0015001799999936338 } } }, "UnityEnvironment.step": { "total": 0.046656702999939625, "count": 1, "is_parallel": true, "self": 0.0006491869999081246, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0003711240000257021, "count": 1, "is_parallel": true, "self": 0.0003711240000257021 }, "communicator.exchange": { "total": 0.043628388000001905, "count": 1, "is_parallel": true, "self": 0.043628388000001905 }, "steps_from_proto": { "total": 0.0020080040000038935, "count": 1, "is_parallel": true, "self": 0.00036630499982948095, "children": { "_process_rank_one_or_two_observation": { "total": 0.0016416990001744125, "count": 10, "is_parallel": true, "self": 0.0016416990001744125 } } } } } } }, "UnityEnvironment.step": { "total": 233.15375991400583, "count": 18200, "is_parallel": true, "self": 10.542252505960732, "children": { "UnityEnvironment._generate_step_input": { "total": 5.23215092203111, "count": 18200, "is_parallel": true, "self": 5.23215092203111 }, "communicator.exchange": { "total": 184.41406990400242, "count": 18200, "is_parallel": true, "self": 184.41406990400242 }, "steps_from_proto": { "total": 32.96528658201157, "count": 18200, "is_parallel": true, "self": 6.181303478054133, "children": { "_process_rank_one_or_two_observation": { "total": 26.78398310395744, "count": 182000, "is_parallel": true, "self": 26.78398310395744 } } } } } } } } } } } } }, "trainer_threads": { "total": 0.00011420900000302936, "count": 1, "self": 0.00011420900000302936, "children": { "thread_root": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "trainer_advance": { "total": 454.9316594960094, "count": 492050, "is_parallel": true, "self": 10.534527471932051, "children": { "process_trajectory": { "total": 257.5263902000777, "count": 492050, "is_parallel": true, "self": 256.5625163030775, "children": { "RLTrainer._checkpoint": { "total": 0.9638738970002123, "count": 4, "is_parallel": true, "self": 0.9638738970002123 } } }, "_update_policy": { "total": 186.87074182399965, "count": 90, "is_parallel": true, "self": 57.56150328500212, "children": { "TorchPPOOptimizer.update": { "total": 129.30923853899753, "count": 4587, "is_parallel": true, "self": 129.30923853899753 } } } } } } } } }, "TrainerController._save_models": { "total": 0.08631520099993395, "count": 1, "self": 0.0009646979999615724, "children": { "RLTrainer._checkpoint": { "total": 0.08535050299997238, "count": 1, "self": 0.08535050299997238 } } } } } } }