rajveer43's picture
First Push
86e82d1 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.4903042316436768,
"min": 1.473644495010376,
"max": 2.88923978805542,
"count": 60
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7147.4990234375,
"min": 6569.8173828125,
"max": 19259.671875,
"count": 60
},
"SnowballTarget.Step.mean": {
"value": 299968.0,
"min": 4912.0,
"max": 299968.0,
"count": 60
},
"SnowballTarget.Step.sum": {
"value": 299968.0,
"min": 4912.0,
"max": 299968.0,
"count": 60
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 24.058250427246094,
"min": -0.018803970888257027,
"max": 24.058250427246094,
"count": 60
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1178.854248046875,
"min": -0.9025905728340149,
"max": 1217.2606201171875,
"count": 60
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 60
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 6567.0,
"min": 2189.0,
"max": 6567.0,
"count": 60
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.363636363636363,
"min": 2.9545454545454546,
"max": 26.363636363636363,
"count": 60
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 580.0,
"min": 65.0,
"max": 749.0,
"count": 60
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.363636363636363,
"min": 2.9545454545454546,
"max": 26.363636363636363,
"count": 60
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 580.0,
"min": 65.0,
"max": 749.0,
"count": 60
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0499648793535444,
"min": 0.037421126091623375,
"max": 0.061435123433372665,
"count": 60
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.0499648793535444,
"min": 0.037421126091623375,
"max": 0.10900242760647363,
"count": 60
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.24116564182674183,
"min": 0.0974211367614129,
"max": 0.3482391301323386,
"count": 60
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.24116564182674183,
"min": 0.0974211367614129,
"max": 0.6832874596118927,
"count": 60
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 2.667664000000039e-07,
"min": 2.667664000000039e-07,
"max": 9.853333480000002e-05,
"count": 60
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.667664000000039e-07,
"min": 2.667664000000039e-07,
"max": 0.00017506669160000004,
"count": 60
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.09999999999999998,
"min": 0.09999999999999998,
"max": 0.09999999999999998,
"count": 60
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.09999999999999998,
"min": 0.09999999999999998,
"max": 0.19999999999999996,
"count": 60
},
"SnowballTarget.Policy.Beta.mean": {
"value": 3.6640000000000395e-05,
"min": 3.6640000000000395e-05,
"max": 0.009853479999999998,
"count": 60
},
"SnowballTarget.Policy.Beta.sum": {
"value": 3.6640000000000395e-05,
"min": 3.6640000000000395e-05,
"max": 0.017509160000000003,
"count": 60
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 60
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 60
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1726660983",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1726661726"
},
"total": 742.5457266829999,
"count": 1,
"self": 0.4337463519996163,
"children": {
"run_training.setup": {
"total": 0.05375741100033338,
"count": 1,
"self": 0.05375741100033338
},
"TrainerController.start_learning": {
"total": 742.0582229199999,
"count": 1,
"self": 0.9639921740408681,
"children": {
"TrainerController._reset_env": {
"total": 2.87935927000035,
"count": 1,
"self": 2.87935927000035
},
"TrainerController.advance": {
"total": 738.0103127039583,
"count": 27414,
"self": 0.4029001189956034,
"children": {
"env_step": {
"total": 737.6074125849627,
"count": 27414,
"self": 399.4398875051984,
"children": {
"SubprocessEnvManager._take_step": {
"total": 337.7503058068478,
"count": 27414,
"self": 2.139963248897402,
"children": {
"TorchPolicy.evaluate": {
"total": 335.6103425579504,
"count": 27414,
"self": 335.6103425579504
}
}
},
"workers": {
"total": 0.4172192729165545,
"count": 27414,
"self": 0.0,
"children": {
"worker_root": {
"total": 740.0496240319417,
"count": 27414,
"is_parallel": true,
"self": 416.7165244518492,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002100170999256079,
"count": 1,
"is_parallel": true,
"self": 0.0006135749981694971,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001486596001086582,
"count": 10,
"is_parallel": true,
"self": 0.001486596001086582
}
}
},
"UnityEnvironment.step": {
"total": 0.06091328100046667,
"count": 1,
"is_parallel": true,
"self": 0.0005985839998174924,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00039034100063872756,
"count": 1,
"is_parallel": true,
"self": 0.00039034100063872756
},
"communicator.exchange": {
"total": 0.0582315120000203,
"count": 1,
"is_parallel": true,
"self": 0.0582315120000203
},
"steps_from_proto": {
"total": 0.0016928439999901457,
"count": 1,
"is_parallel": true,
"self": 0.0003080729984503705,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013847710015397752,
"count": 10,
"is_parallel": true,
"self": 0.0013847710015397752
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 323.3330995800925,
"count": 27413,
"is_parallel": true,
"self": 14.863294503912584,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.755492722012605,
"count": 27413,
"is_parallel": true,
"self": 7.755492722012605
},
"communicator.exchange": {
"total": 251.88660386410993,
"count": 27413,
"is_parallel": true,
"self": 251.88660386410993
},
"steps_from_proto": {
"total": 48.827708490057375,
"count": 27413,
"is_parallel": true,
"self": 9.056327009237975,
"children": {
"_process_rank_one_or_two_observation": {
"total": 39.7713814808194,
"count": 274130,
"is_parallel": true,
"self": 39.7713814808194
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0008779700001468882,
"count": 1,
"self": 0.0008779700001468882,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 731.5485044326742,
"count": 857191,
"is_parallel": true,
"self": 17.148951226859936,
"children": {
"process_trajectory": {
"total": 338.38722694181615,
"count": 857191,
"is_parallel": true,
"self": 333.41943709581574,
"children": {
"RLTrainer._checkpoint": {
"total": 4.967789846000414,
"count": 12,
"is_parallel": true,
"self": 4.967789846000414
}
}
},
"_update_policy": {
"total": 376.01232626399815,
"count": 68,
"is_parallel": true,
"self": 146.37344319796193,
"children": {
"TorchPPOOptimizer.update": {
"total": 229.63888306603621,
"count": 5780,
"is_parallel": true,
"self": 229.63888306603621
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.20368080200023542,
"count": 1,
"self": 0.0023105770005713566,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20137022499966406,
"count": 1,
"self": 0.20137022499966406
}
}
}
}
}
}
}