hannahh7's picture
First try
5ecf2e7
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0199246406555176,
"min": 1.0199246406555176,
"max": 2.8538265228271484,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9738.240234375,
"min": 9738.240234375,
"max": 29194.64453125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.676046371459961,
"min": 0.14666564762592316,
"max": 12.676046371459961,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2471.8291015625,
"min": 28.453136444091797,
"max": 2577.89794921875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07431003886677881,
"min": 0.06301956523812431,
"max": 0.07431003886677881,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.29724015546711524,
"min": 0.2571182431477378,
"max": 0.36866475900009676,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.22434938888923794,
"min": 0.11079428415042439,
"max": 0.28154752610945233,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8973975555569518,
"min": 0.44317713660169755,
"max": 1.4077376305472615,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.0,
"min": 3.272727272727273,
"max": 25.5,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1100.0,
"min": 144.0,
"max": 1381.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.0,
"min": 3.272727272727273,
"max": 25.5,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1100.0,
"min": 144.0,
"max": 1381.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1686555606",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1686556090"
},
"total": 483.81287726899996,
"count": 1,
"self": 0.4386677669999699,
"children": {
"run_training.setup": {
"total": 0.04113501400001951,
"count": 1,
"self": 0.04113501400001951
},
"TrainerController.start_learning": {
"total": 483.33307448799997,
"count": 1,
"self": 0.5937868630068124,
"children": {
"TrainerController._reset_env": {
"total": 4.81646604499997,
"count": 1,
"self": 4.81646604499997
},
"TrainerController.advance": {
"total": 477.7825102999931,
"count": 18202,
"self": 0.29438425599346374,
"children": {
"env_step": {
"total": 477.4881260439996,
"count": 18202,
"self": 347.48027918000446,
"children": {
"SubprocessEnvManager._take_step": {
"total": 129.69838081100067,
"count": 18202,
"self": 1.8592176650066676,
"children": {
"TorchPolicy.evaluate": {
"total": 127.83916314599401,
"count": 18202,
"self": 127.83916314599401
}
}
},
"workers": {
"total": 0.3094660529944804,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 481.6222364020025,
"count": 18202,
"is_parallel": true,
"self": 224.50564655100698,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005176987000027111,
"count": 1,
"is_parallel": true,
"self": 0.003490162999923996,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016868240001031154,
"count": 10,
"is_parallel": true,
"self": 0.0016868240001031154
}
}
},
"UnityEnvironment.step": {
"total": 0.09202599800005373,
"count": 1,
"is_parallel": true,
"self": 0.0006330470000648347,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003790240000398626,
"count": 1,
"is_parallel": true,
"self": 0.0003790240000398626
},
"communicator.exchange": {
"total": 0.08880063599997357,
"count": 1,
"is_parallel": true,
"self": 0.08880063599997357
},
"steps_from_proto": {
"total": 0.00221329099997547,
"count": 1,
"is_parallel": true,
"self": 0.0003932209998538383,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001820070000121632,
"count": 10,
"is_parallel": true,
"self": 0.001820070000121632
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 257.1165898509955,
"count": 18201,
"is_parallel": true,
"self": 10.663191966998284,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.412404158001777,
"count": 18201,
"is_parallel": true,
"self": 5.412404158001777
},
"communicator.exchange": {
"total": 205.35848143399153,
"count": 18201,
"is_parallel": true,
"self": 205.35848143399153
},
"steps_from_proto": {
"total": 35.682512292003935,
"count": 18201,
"is_parallel": true,
"self": 6.611146507061562,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.071365784942373,
"count": 182010,
"is_parallel": true,
"self": 29.071365784942373
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00016681300007803657,
"count": 1,
"self": 0.00016681300007803657,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 474.19935985897314,
"count": 468533,
"is_parallel": true,
"self": 9.869260101922237,
"children": {
"process_trajectory": {
"total": 260.993350323051,
"count": 468533,
"is_parallel": true,
"self": 260.16897806805105,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8243722549999575,
"count": 4,
"is_parallel": true,
"self": 0.8243722549999575
}
}
},
"_update_policy": {
"total": 203.3367494339999,
"count": 90,
"is_parallel": true,
"self": 76.01625816000444,
"children": {
"TorchPPOOptimizer.update": {
"total": 127.32049127399546,
"count": 4587,
"is_parallel": true,
"self": 127.32049127399546
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.14014446700002736,
"count": 1,
"self": 0.0008987769999748707,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1392456900000525,
"count": 1,
"self": 0.1392456900000525
}
}
}
}
}
}
}