rika37's picture
First Push
2c3e53b
raw
history blame
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0754239559173584,
"min": 1.0754239559173584,
"max": 2.8491103649139404,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10315.466796875,
"min": 10315.466796875,
"max": 29177.73828125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.468709945678711,
"min": 0.3454476594924927,
"max": 12.468709945678711,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2431.3984375,
"min": 67.016845703125,
"max": 2526.640625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06978167955329648,
"min": 0.0634384319576152,
"max": 0.07572595935219878,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2791267182131859,
"min": 0.26032307795842413,
"max": 0.3546620882945146,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.216440100222826,
"min": 0.11593264300609007,
"max": 0.29806401297157886,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.865760400891304,
"min": 0.4637305720243603,
"max": 1.4903200648578943,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.545454545454547,
"min": 3.3181818181818183,
"max": 24.563636363636363,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1080.0,
"min": 146.0,
"max": 1351.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.545454545454547,
"min": 3.3181818181818183,
"max": 24.563636363636363,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1080.0,
"min": 146.0,
"max": 1351.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1701173470",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1701173973"
},
"total": 503.20595121299993,
"count": 1,
"self": 0.6281732980000925,
"children": {
"run_training.setup": {
"total": 0.056630026999982874,
"count": 1,
"self": 0.056630026999982874
},
"TrainerController.start_learning": {
"total": 502.52114788799986,
"count": 1,
"self": 0.597386738007117,
"children": {
"TrainerController._reset_env": {
"total": 3.2978635959999565,
"count": 1,
"self": 3.2978635959999565
},
"TrainerController.advance": {
"total": 498.4949427519929,
"count": 18208,
"self": 0.3166687919930382,
"children": {
"env_step": {
"total": 498.17827395999984,
"count": 18208,
"self": 338.1480770190067,
"children": {
"SubprocessEnvManager._take_step": {
"total": 159.72029375400155,
"count": 18208,
"self": 1.5391782729985835,
"children": {
"TorchPolicy.evaluate": {
"total": 158.18111548100296,
"count": 18208,
"self": 158.18111548100296
}
}
},
"workers": {
"total": 0.3099031869916189,
"count": 18208,
"self": 0.0,
"children": {
"worker_root": {
"total": 501.06138416400756,
"count": 18208,
"is_parallel": true,
"self": 245.36106247500572,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004889170999945236,
"count": 1,
"is_parallel": true,
"self": 0.0033845309998241646,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015046400001210714,
"count": 10,
"is_parallel": true,
"self": 0.0015046400001210714
}
}
},
"UnityEnvironment.step": {
"total": 0.059212819999970634,
"count": 1,
"is_parallel": true,
"self": 0.0006581659998801115,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00040952900008051074,
"count": 1,
"is_parallel": true,
"self": 0.00040952900008051074
},
"communicator.exchange": {
"total": 0.0560276740000063,
"count": 1,
"is_parallel": true,
"self": 0.0560276740000063
},
"steps_from_proto": {
"total": 0.0021174510000037117,
"count": 1,
"is_parallel": true,
"self": 0.0003833170000007158,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017341340000029959,
"count": 10,
"is_parallel": true,
"self": 0.0017341340000029959
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 255.70032168900184,
"count": 18207,
"is_parallel": true,
"self": 11.233851602008599,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.744747306997851,
"count": 18207,
"is_parallel": true,
"self": 5.744747306997851
},
"communicator.exchange": {
"total": 201.547408055005,
"count": 18207,
"is_parallel": true,
"self": 201.547408055005
},
"steps_from_proto": {
"total": 37.17431472499038,
"count": 18207,
"is_parallel": true,
"self": 7.089745190026747,
"children": {
"_process_rank_one_or_two_observation": {
"total": 30.084569534963634,
"count": 182070,
"is_parallel": true,
"self": 30.084569534963634
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00019443299993326946,
"count": 1,
"self": 0.00019443299993326946,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 494.043177466006,
"count": 534170,
"is_parallel": true,
"self": 11.943981519016802,
"children": {
"process_trajectory": {
"total": 280.7688851769891,
"count": 534170,
"is_parallel": true,
"self": 279.87450297098917,
"children": {
"RLTrainer._checkpoint": {
"total": 0.894382205999932,
"count": 4,
"is_parallel": true,
"self": 0.894382205999932
}
}
},
"_update_policy": {
"total": 201.3303107700001,
"count": 90,
"is_parallel": true,
"self": 63.39183269499654,
"children": {
"TorchPPOOptimizer.update": {
"total": 137.93847807500356,
"count": 4587,
"is_parallel": true,
"self": 137.93847807500356
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13076036899997234,
"count": 1,
"self": 0.0011830010000721813,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12957736799990016,
"count": 1,
"self": 0.12957736799990016
}
}
}
}
}
}
}