Fetanos's picture
First Push
6ff793a verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9588591456413269,
"min": 0.9588591456413269,
"max": 2.861081600189209,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9165.734375,
"min": 9165.734375,
"max": 29300.3359375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.622570991516113,
"min": 0.47253069281578064,
"max": 12.785687446594238,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2461.4013671875,
"min": 91.67095184326172,
"max": 2596.42724609375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06525593624260806,
"min": 0.06162248689817681,
"max": 0.07651369552146278,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.26102374497043224,
"min": 0.24648994759270723,
"max": 0.35044862595660725,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20479879455239167,
"min": 0.12583700206596404,
"max": 0.2718899133743024,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8191951782095667,
"min": 0.5033480082638562,
"max": 1.359449566871512,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.181818181818183,
"min": 3.5681818181818183,
"max": 25.236363636363638,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1108.0,
"min": 157.0,
"max": 1388.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.181818181818183,
"min": 3.5681818181818183,
"max": 25.236363636363638,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1108.0,
"min": 157.0,
"max": 1388.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1715770830",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1715771462"
},
"total": 631.824444295,
"count": 1,
"self": 0.60803149700007,
"children": {
"run_training.setup": {
"total": 0.08130687700003136,
"count": 1,
"self": 0.08130687700003136
},
"TrainerController.start_learning": {
"total": 631.135105921,
"count": 1,
"self": 0.9356922090142916,
"children": {
"TrainerController._reset_env": {
"total": 3.2279529090000096,
"count": 1,
"self": 3.2279529090000096
},
"TrainerController.advance": {
"total": 626.8588180769858,
"count": 18200,
"self": 0.4552603219987077,
"children": {
"env_step": {
"total": 626.4035577549871,
"count": 18200,
"self": 485.2383844989616,
"children": {
"SubprocessEnvManager._take_step": {
"total": 140.71315892000717,
"count": 18200,
"self": 2.7572833920066273,
"children": {
"TorchPolicy.evaluate": {
"total": 137.95587552800055,
"count": 18200,
"self": 137.95587552800055
}
}
},
"workers": {
"total": 0.45201433601835106,
"count": 18200,
"self": 0.0,
"children": {
"worker_root": {
"total": 628.8350588250061,
"count": 18200,
"is_parallel": true,
"self": 290.0869080960142,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.008539799000004678,
"count": 1,
"is_parallel": true,
"self": 0.006596746000127496,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019430529998771817,
"count": 10,
"is_parallel": true,
"self": 0.0019430529998771817
}
}
},
"UnityEnvironment.step": {
"total": 0.051609858999995595,
"count": 1,
"is_parallel": true,
"self": 0.0009817709999992985,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005574159999923722,
"count": 1,
"is_parallel": true,
"self": 0.0005574159999923722
},
"communicator.exchange": {
"total": 0.04751542299999301,
"count": 1,
"is_parallel": true,
"self": 0.04751542299999301
},
"steps_from_proto": {
"total": 0.0025552490000109174,
"count": 1,
"is_parallel": true,
"self": 0.000615981000123611,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019392679998873064,
"count": 10,
"is_parallel": true,
"self": 0.0019392679998873064
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 338.74815072899185,
"count": 18199,
"is_parallel": true,
"self": 16.461354648018016,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 8.161687908985527,
"count": 18199,
"is_parallel": true,
"self": 8.161687908985527
},
"communicator.exchange": {
"total": 267.2559471679942,
"count": 18199,
"is_parallel": true,
"self": 267.2559471679942
},
"steps_from_proto": {
"total": 46.86916100399412,
"count": 18199,
"is_parallel": true,
"self": 9.669391974988741,
"children": {
"_process_rank_one_or_two_observation": {
"total": 37.19976902900538,
"count": 181990,
"is_parallel": true,
"self": 37.19976902900538
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00022013500006323738,
"count": 1,
"self": 0.00022013500006323738,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 619.73006950897,
"count": 793529,
"is_parallel": true,
"self": 18.413523699957295,
"children": {
"process_trajectory": {
"total": 345.82192560001187,
"count": 793529,
"is_parallel": true,
"self": 345.20606185901175,
"children": {
"RLTrainer._checkpoint": {
"total": 0.615863741000112,
"count": 4,
"is_parallel": true,
"self": 0.615863741000112
}
}
},
"_update_policy": {
"total": 255.4946202090009,
"count": 90,
"is_parallel": true,
"self": 66.74412219599986,
"children": {
"TorchPPOOptimizer.update": {
"total": 188.75049801300105,
"count": 4587,
"is_parallel": true,
"self": 188.75049801300105
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.11242259099981311,
"count": 1,
"self": 0.0017075109999495908,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11071507999986352,
"count": 1,
"self": 0.11071507999986352
}
}
}
}
}
}
}