moczard's picture
First Push
13ef5b4 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.026114821434021,
"min": 1.026114821434021,
"max": 2.8681693077087402,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9808.6318359375,
"min": 9808.6318359375,
"max": 29372.921875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.59863567352295,
"min": 0.4274159073829651,
"max": 12.609695434570312,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2456.73388671875,
"min": 82.91868591308594,
"max": 2556.86962890625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06989119633790859,
"min": 0.06238066950851741,
"max": 0.0728508560949658,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27956478535163437,
"min": 0.25359664498640033,
"max": 0.364254280474829,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21823058901902506,
"min": 0.12625516106502388,
"max": 0.2745835717548342,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8729223560761002,
"min": 0.5050206442600955,
"max": 1.2931683390748265,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.636363636363637,
"min": 3.477272727272727,
"max": 25.12727272727273,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1084.0,
"min": 153.0,
"max": 1382.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.636363636363637,
"min": 3.477272727272727,
"max": 25.12727272727273,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1084.0,
"min": 153.0,
"max": 1382.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1714415389",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1714415844"
},
"total": 454.96688982,
"count": 1,
"self": 0.4400821689998793,
"children": {
"run_training.setup": {
"total": 0.07762357699994027,
"count": 1,
"self": 0.07762357699994027
},
"TrainerController.start_learning": {
"total": 454.4491840740002,
"count": 1,
"self": 0.5649350209948807,
"children": {
"TrainerController._reset_env": {
"total": 3.250793213999941,
"count": 1,
"self": 3.250793213999941
},
"TrainerController.advance": {
"total": 450.54300560600564,
"count": 18199,
"self": 0.2872963790139238,
"children": {
"env_step": {
"total": 450.2557092269917,
"count": 18199,
"self": 293.8344283359738,
"children": {
"SubprocessEnvManager._take_step": {
"total": 156.12787657601143,
"count": 18199,
"self": 1.4580152889930105,
"children": {
"TorchPolicy.evaluate": {
"total": 154.66986128701842,
"count": 18199,
"self": 154.66986128701842
}
}
},
"workers": {
"total": 0.29340431500645536,
"count": 18199,
"self": 0.0,
"children": {
"worker_root": {
"total": 453.25480569201954,
"count": 18199,
"is_parallel": true,
"self": 225.85769595102647,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.009769147999804773,
"count": 1,
"is_parallel": true,
"self": 0.005242399999360714,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0045267480004440586,
"count": 10,
"is_parallel": true,
"self": 0.0045267480004440586
}
}
},
"UnityEnvironment.step": {
"total": 0.041969869000013205,
"count": 1,
"is_parallel": true,
"self": 0.0006880649998493027,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004272010000931914,
"count": 1,
"is_parallel": true,
"self": 0.0004272010000931914
},
"communicator.exchange": {
"total": 0.03884167399996841,
"count": 1,
"is_parallel": true,
"self": 0.03884167399996841
},
"steps_from_proto": {
"total": 0.0020129290001023037,
"count": 1,
"is_parallel": true,
"self": 0.0004067720001330599,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016061569999692438,
"count": 10,
"is_parallel": true,
"self": 0.0016061569999692438
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 227.39710974099307,
"count": 18198,
"is_parallel": true,
"self": 10.367211914003747,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.284454141006108,
"count": 18198,
"is_parallel": true,
"self": 5.284454141006108
},
"communicator.exchange": {
"total": 178.37840468900686,
"count": 18198,
"is_parallel": true,
"self": 178.37840468900686
},
"steps_from_proto": {
"total": 33.36703899697636,
"count": 18198,
"is_parallel": true,
"self": 6.313206856961415,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.053832140014947,
"count": 181980,
"is_parallel": true,
"self": 27.053832140014947
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00018504499985283474,
"count": 1,
"self": 0.00018504499985283474,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 445.4800992340015,
"count": 638214,
"is_parallel": true,
"self": 13.543267435043845,
"children": {
"process_trajectory": {
"total": 246.48700625495758,
"count": 638214,
"is_parallel": true,
"self": 245.89898512895752,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5880211260000578,
"count": 4,
"is_parallel": true,
"self": 0.5880211260000578
}
}
},
"_update_policy": {
"total": 185.44982554400008,
"count": 90,
"is_parallel": true,
"self": 53.6474246179846,
"children": {
"TorchPPOOptimizer.update": {
"total": 131.80240092601548,
"count": 4587,
"is_parallel": true,
"self": 131.80240092601548
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.0902651879998757,
"count": 1,
"self": 0.0009296909997829061,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0893354970000928,
"count": 1,
"self": 0.0893354970000928
}
}
}
}
}
}
}