gputrain's picture
First Push
0006136
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.3329293727874756,
"min": 1.3329293727874756,
"max": 2.88082218170166,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 12741.4716796875,
"min": 12741.4716796875,
"max": 29534.189453125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.384367942810059,
"min": 0.14589832723140717,
"max": 11.384367942810059,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2219.95166015625,
"min": 28.304275512695312,
"max": 2257.34765625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 23.431818181818183,
"min": 2.772727272727273,
"max": 23.818181818181817,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1031.0,
"min": 122.0,
"max": 1310.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 23.431818181818183,
"min": 2.772727272727273,
"max": 23.818181818181817,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1031.0,
"min": 122.0,
"max": 1310.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.04093629062525924,
"min": 0.04093629062525924,
"max": 0.05573761350288074,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.08187258125051848,
"min": 0.08187258125051848,
"max": 0.15768451145484896,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2171214446425438,
"min": 0.10377455410985824,
"max": 0.29310326570389317,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.4342428892850876,
"min": 0.20754910821971648,
"max": 0.8789571047413582,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.0720097856000005e-05,
"min": 1.0720097856000005e-05,
"max": 0.000483720003256,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.144019571200001e-05,
"min": 2.144019571200001e-05,
"max": 0.0012366600526679998,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.102144,
"min": 0.102144,
"max": 0.196744,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.204288,
"min": 0.204288,
"max": 0.5473320000000002,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00011698560000000007,
"min": 0.00011698560000000007,
"max": 0.0048375256,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00023397120000000015,
"min": 0.00023397120000000015,
"max": 0.012371866800000002,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1704587125",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1704587554"
},
"total": 429.6008158349998,
"count": 1,
"self": 0.43890717999966,
"children": {
"run_training.setup": {
"total": 0.05522954399998525,
"count": 1,
"self": 0.05522954399998525
},
"TrainerController.start_learning": {
"total": 429.1066791110002,
"count": 1,
"self": 0.5772311950070161,
"children": {
"TrainerController._reset_env": {
"total": 3.3237178720000884,
"count": 1,
"self": 3.3237178720000884
},
"TrainerController.advance": {
"total": 425.11653304299307,
"count": 18200,
"self": 0.2563298209684035,
"children": {
"env_step": {
"total": 424.86020322202467,
"count": 18200,
"self": 282.17213610201793,
"children": {
"SubprocessEnvManager._take_step": {
"total": 142.40937749999694,
"count": 18200,
"self": 1.3907866299757643,
"children": {
"TorchPolicy.evaluate": {
"total": 141.01859087002117,
"count": 18200,
"self": 141.01859087002117
}
}
},
"workers": {
"total": 0.2786896200097999,
"count": 18200,
"self": 0.0,
"children": {
"worker_root": {
"total": 427.982290600012,
"count": 18200,
"is_parallel": true,
"self": 209.3083058220052,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0051314699999238655,
"count": 1,
"is_parallel": true,
"self": 0.003852289999485947,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012791800004379184,
"count": 10,
"is_parallel": true,
"self": 0.0012791800004379184
}
}
},
"UnityEnvironment.step": {
"total": 0.037507637999851795,
"count": 1,
"is_parallel": true,
"self": 0.0006734529999903316,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000390698000046541,
"count": 1,
"is_parallel": true,
"self": 0.000390698000046541
},
"communicator.exchange": {
"total": 0.03459158199984813,
"count": 1,
"is_parallel": true,
"self": 0.03459158199984813
},
"steps_from_proto": {
"total": 0.0018519049999667914,
"count": 1,
"is_parallel": true,
"self": 0.00036054900010640267,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014913559998603887,
"count": 10,
"is_parallel": true,
"self": 0.0014913559998603887
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 218.6739847780068,
"count": 18199,
"is_parallel": true,
"self": 10.499413984015973,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.261026000003312,
"count": 18199,
"is_parallel": true,
"self": 5.261026000003312
},
"communicator.exchange": {
"total": 170.27880161399048,
"count": 18199,
"is_parallel": true,
"self": 170.27880161399048
},
"steps_from_proto": {
"total": 32.63474317999703,
"count": 18199,
"is_parallel": true,
"self": 5.9853682760285665,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.649374903968464,
"count": 181990,
"is_parallel": true,
"self": 26.649374903968464
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001599960000930878,
"count": 1,
"self": 0.0001599960000930878,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 417.8603841170309,
"count": 855215,
"is_parallel": true,
"self": 17.674628003091357,
"children": {
"process_trajectory": {
"total": 287.63484019894054,
"count": 855215,
"is_parallel": true,
"self": 286.7791767329404,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8556634660001237,
"count": 4,
"is_parallel": true,
"self": 0.8556634660001237
}
}
},
"_update_policy": {
"total": 112.550915914999,
"count": 45,
"is_parallel": true,
"self": 50.37968113200509,
"children": {
"TorchPPOOptimizer.update": {
"total": 62.171234782993906,
"count": 2292,
"is_parallel": true,
"self": 62.171234782993906
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.08903700499990919,
"count": 1,
"self": 0.0010539339998558717,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08798307100005331,
"count": 1,
"self": 0.08798307100005331
}
}
}
}
}
}
}