dude121's picture
First Push
b820599
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0283766984939575,
"min": 1.0283766984939575,
"max": 2.868455410003662,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9841.5654296875,
"min": 9841.5654296875,
"max": 29438.95703125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.934687614440918,
"min": 0.41554394364356995,
"max": 12.95661449432373,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2522.26416015625,
"min": 80.61552429199219,
"max": 2643.1494140625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06932028354906514,
"min": 0.0623955137408641,
"max": 0.07233752891211313,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27728113419626055,
"min": 0.2495820549634564,
"max": 0.3616876445605657,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18596041290199056,
"min": 0.14421047310929233,
"max": 0.2808243039191938,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7438416516079622,
"min": 0.5768418924371693,
"max": 1.346978288070828,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.704545454545453,
"min": 3.6818181818181817,
"max": 25.954545454545453,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1131.0,
"min": 162.0,
"max": 1401.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.704545454545453,
"min": 3.6818181818181817,
"max": 25.954545454545453,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1131.0,
"min": 162.0,
"max": 1401.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1702956224",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1702956922"
},
"total": 698.471670775,
"count": 1,
"self": 0.9695368379999536,
"children": {
"run_training.setup": {
"total": 0.08324628299999404,
"count": 1,
"self": 0.08324628299999404
},
"TrainerController.start_learning": {
"total": 697.4188876540001,
"count": 1,
"self": 1.029985272011686,
"children": {
"TrainerController._reset_env": {
"total": 4.753415781999934,
"count": 1,
"self": 4.753415781999934
},
"TrainerController.advance": {
"total": 691.4698217769884,
"count": 18202,
"self": 0.5275586039834934,
"children": {
"env_step": {
"total": 690.9422631730049,
"count": 18202,
"self": 541.829210780002,
"children": {
"SubprocessEnvManager._take_step": {
"total": 148.60391728900163,
"count": 18202,
"self": 3.1537491299941394,
"children": {
"TorchPolicy.evaluate": {
"total": 145.4501681590075,
"count": 18202,
"self": 145.4501681590075
}
}
},
"workers": {
"total": 0.5091351040013024,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 694.6566163799984,
"count": 18202,
"is_parallel": true,
"self": 315.3600288680168,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007991138999955183,
"count": 1,
"is_parallel": true,
"self": 0.005821822999678261,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002169316000276922,
"count": 10,
"is_parallel": true,
"self": 0.002169316000276922
}
}
},
"UnityEnvironment.step": {
"total": 0.052060180000012224,
"count": 1,
"is_parallel": true,
"self": 0.0009062850001555489,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004894979999789939,
"count": 1,
"is_parallel": true,
"self": 0.0004894979999789939
},
"communicator.exchange": {
"total": 0.04813185499995143,
"count": 1,
"is_parallel": true,
"self": 0.04813185499995143
},
"steps_from_proto": {
"total": 0.002532541999926252,
"count": 1,
"is_parallel": true,
"self": 0.0005168640000192681,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002015677999906984,
"count": 10,
"is_parallel": true,
"self": 0.002015677999906984
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 379.29658751198156,
"count": 18201,
"is_parallel": true,
"self": 17.9540959859811,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 8.682026031001442,
"count": 18201,
"is_parallel": true,
"self": 8.682026031001442
},
"communicator.exchange": {
"total": 300.90285196200716,
"count": 18201,
"is_parallel": true,
"self": 300.90285196200716
},
"steps_from_proto": {
"total": 51.757613532991854,
"count": 18201,
"is_parallel": true,
"self": 10.752541740945503,
"children": {
"_process_rank_one_or_two_observation": {
"total": 41.00507179204635,
"count": 182010,
"is_parallel": true,
"self": 41.00507179204635
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001813460000903433,
"count": 1,
"self": 0.0001813460000903433,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 683.1195377060263,
"count": 907544,
"is_parallel": true,
"self": 20.91310494599759,
"children": {
"process_trajectory": {
"total": 376.8651853310291,
"count": 907544,
"is_parallel": true,
"self": 375.2597579390293,
"children": {
"RLTrainer._checkpoint": {
"total": 1.6054273919997968,
"count": 4,
"is_parallel": true,
"self": 1.6054273919997968
}
}
},
"_update_policy": {
"total": 285.3412474289996,
"count": 90,
"is_parallel": true,
"self": 84.08417874699728,
"children": {
"TorchPPOOptimizer.update": {
"total": 201.25706868200234,
"count": 4587,
"is_parallel": true,
"self": 201.25706868200234
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.16548347699995247,
"count": 1,
"self": 0.0022316950000913494,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16325178199986112,
"count": 1,
"self": 0.16325178199986112
}
}
}
}
}
}
}