tarpalsus's picture
First Push
5ca3f36 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0336956977844238,
"min": 1.0336956977844238,
"max": 2.876385450363159,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9881.0966796875,
"min": 9881.0966796875,
"max": 29551.984375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.26629638671875,
"min": 0.25742095708847046,
"max": 12.26629638671875,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2391.927734375,
"min": 49.939666748046875,
"max": 2489.2080078125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07425793066481585,
"min": 0.061571063889720126,
"max": 0.07425793066481585,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2970317226592634,
"min": 0.2623737781353103,
"max": 0.36180783712588177,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.22475293994534248,
"min": 0.11227680672663173,
"max": 0.28229965626024733,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8990117597813699,
"min": 0.44910722690652694,
"max": 1.4114982813012367,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.113636363636363,
"min": 2.977272727272727,
"max": 24.34090909090909,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1061.0,
"min": 131.0,
"max": 1313.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.113636363636363,
"min": 2.977272727272727,
"max": 24.34090909090909,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1061.0,
"min": 131.0,
"max": 1313.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1713695928",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1713696490"
},
"total": 562.499700265,
"count": 1,
"self": 0.5963138259999141,
"children": {
"run_training.setup": {
"total": 0.06972452200000134,
"count": 1,
"self": 0.06972452200000134
},
"TrainerController.start_learning": {
"total": 561.8336619170001,
"count": 1,
"self": 0.7514540310090752,
"children": {
"TrainerController._reset_env": {
"total": 2.9765511180000317,
"count": 1,
"self": 2.9765511180000317
},
"TrainerController.advance": {
"total": 557.949218960991,
"count": 18207,
"self": 0.3914462650021733,
"children": {
"env_step": {
"total": 557.5577726959888,
"count": 18207,
"self": 430.7928103269917,
"children": {
"SubprocessEnvManager._take_step": {
"total": 126.35898243299897,
"count": 18207,
"self": 2.037915765990533,
"children": {
"TorchPolicy.evaluate": {
"total": 124.32106666700844,
"count": 18207,
"self": 124.32106666700844
}
}
},
"workers": {
"total": 0.40597993599811844,
"count": 18207,
"self": 0.0,
"children": {
"worker_root": {
"total": 560.0329550000029,
"count": 18207,
"is_parallel": true,
"self": 260.6208614900016,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.008829442000035215,
"count": 1,
"is_parallel": true,
"self": 0.004044169000053444,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004785272999981771,
"count": 10,
"is_parallel": true,
"self": 0.004785272999981771
}
}
},
"UnityEnvironment.step": {
"total": 0.041242122999960884,
"count": 1,
"is_parallel": true,
"self": 0.0008294749999322448,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004758489999971971,
"count": 1,
"is_parallel": true,
"self": 0.0004758489999971971
},
"communicator.exchange": {
"total": 0.037597364000021116,
"count": 1,
"is_parallel": true,
"self": 0.037597364000021116
},
"steps_from_proto": {
"total": 0.002339435000010326,
"count": 1,
"is_parallel": true,
"self": 0.000576591000026383,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001762843999983943,
"count": 10,
"is_parallel": true,
"self": 0.001762843999983943
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 299.4120935100013,
"count": 18206,
"is_parallel": true,
"self": 14.113401544989472,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.517350790007981,
"count": 18206,
"is_parallel": true,
"self": 7.517350790007981
},
"communicator.exchange": {
"total": 234.28724535899357,
"count": 18206,
"is_parallel": true,
"self": 234.28724535899357
},
"steps_from_proto": {
"total": 43.49409581601026,
"count": 18206,
"is_parallel": true,
"self": 8.454345316027002,
"children": {
"_process_rank_one_or_two_observation": {
"total": 35.039750499983256,
"count": 182060,
"is_parallel": true,
"self": 35.039750499983256
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00013622899996335036,
"count": 1,
"self": 0.00013622899996335036,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 551.3410951720213,
"count": 765715,
"is_parallel": true,
"self": 17.135888109047983,
"children": {
"process_trajectory": {
"total": 295.59925369297343,
"count": 765715,
"is_parallel": true,
"self": 294.7602604869734,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8389932060000547,
"count": 4,
"is_parallel": true,
"self": 0.8389932060000547
}
}
},
"_update_policy": {
"total": 238.60595336999984,
"count": 90,
"is_parallel": true,
"self": 63.001297269995234,
"children": {
"TorchPPOOptimizer.update": {
"total": 175.6046561000046,
"count": 4584,
"is_parallel": true,
"self": 175.6046561000046
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.15630157800001143,
"count": 1,
"self": 0.0029596329998184956,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15334194500019294,
"count": 1,
"self": 0.15334194500019294
}
}
}
}
}
}
}