ThNaToS's picture
First Push
499569f verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8547739386558533,
"min": 0.8547739386558533,
"max": 2.8685977458953857,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8161.3818359375,
"min": 8161.3818359375,
"max": 29377.30859375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.78675365447998,
"min": 0.4261854887008667,
"max": 12.837603569030762,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2493.4169921875,
"min": 82.67998504638672,
"max": 2618.87109375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06490979780096048,
"min": 0.06325686523303055,
"max": 0.07351337247439683,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.25963919120384193,
"min": 0.25963919120384193,
"max": 0.35724708498409935,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2374150609853221,
"min": 0.11176768867481573,
"max": 0.2879920241733392,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.9496602439412885,
"min": 0.44707075469926294,
"max": 1.4349613575374378,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.181818181818183,
"min": 3.272727272727273,
"max": 25.318181818181817,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1108.0,
"min": 144.0,
"max": 1392.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.181818181818183,
"min": 3.272727272727273,
"max": 25.318181818181817,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1108.0,
"min": 144.0,
"max": 1392.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1722338306",
"python_version": "3.10.12 (main, Mar 22 2024, 16:50:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1722338741"
},
"total": 434.47053340599996,
"count": 1,
"self": 0.48502582300000086,
"children": {
"run_training.setup": {
"total": 0.05951098600007754,
"count": 1,
"self": 0.05951098600007754
},
"TrainerController.start_learning": {
"total": 433.9259965969999,
"count": 1,
"self": 0.5485184460056871,
"children": {
"TrainerController._reset_env": {
"total": 2.799819007999986,
"count": 1,
"self": 2.799819007999986
},
"TrainerController.advance": {
"total": 430.4839174559943,
"count": 18201,
"self": 0.2502454129951275,
"children": {
"env_step": {
"total": 430.23367204299916,
"count": 18201,
"self": 278.8416262769849,
"children": {
"SubprocessEnvManager._take_step": {
"total": 151.12900241800685,
"count": 18201,
"self": 1.4418840680170888,
"children": {
"TorchPolicy.evaluate": {
"total": 149.68711834998976,
"count": 18201,
"self": 149.68711834998976
}
}
},
"workers": {
"total": 0.26304334800738616,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 432.78862902099775,
"count": 18201,
"is_parallel": true,
"self": 219.7176693360069,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004931026000008387,
"count": 1,
"is_parallel": true,
"self": 0.003407138999818926,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001523887000189461,
"count": 10,
"is_parallel": true,
"self": 0.001523887000189461
}
}
},
"UnityEnvironment.step": {
"total": 0.04270178199999464,
"count": 1,
"is_parallel": true,
"self": 0.0007602549999319308,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00040100100000017846,
"count": 1,
"is_parallel": true,
"self": 0.00040100100000017846
},
"communicator.exchange": {
"total": 0.039627111000072546,
"count": 1,
"is_parallel": true,
"self": 0.039627111000072546
},
"steps_from_proto": {
"total": 0.0019134149999899819,
"count": 1,
"is_parallel": true,
"self": 0.0003583770001114317,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015550379998785502,
"count": 10,
"is_parallel": true,
"self": 0.0015550379998785502
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 213.07095968499084,
"count": 18200,
"is_parallel": true,
"self": 9.897985111961816,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.1010335380013885,
"count": 18200,
"is_parallel": true,
"self": 5.1010335380013885
},
"communicator.exchange": {
"total": 166.53734032102238,
"count": 18200,
"is_parallel": true,
"self": 166.53734032102238
},
"steps_from_proto": {
"total": 31.53460071400525,
"count": 18200,
"is_parallel": true,
"self": 5.880322015031197,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.654278698974053,
"count": 182000,
"is_parallel": true,
"self": 25.654278698974053
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00012245599987181777,
"count": 1,
"self": 0.00012245599987181777,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 425.2050047729397,
"count": 651118,
"is_parallel": true,
"self": 13.297977464920791,
"children": {
"process_trajectory": {
"total": 235.17596657801903,
"count": 651118,
"is_parallel": true,
"self": 234.5065855250191,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6693810529999382,
"count": 4,
"is_parallel": true,
"self": 0.6693810529999382
}
}
},
"_update_policy": {
"total": 176.73106072999985,
"count": 90,
"is_parallel": true,
"self": 56.15382869399218,
"children": {
"TorchPPOOptimizer.update": {
"total": 120.57723203600767,
"count": 4587,
"is_parallel": true,
"self": 120.57723203600767
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09361923100004788,
"count": 1,
"self": 0.0009318620002431999,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09268736899980468,
"count": 1,
"self": 0.09268736899980468
}
}
}
}
}
}
}