skywalker7's picture
First Push
6c06621
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.938618540763855,
"min": 0.938618540763855,
"max": 2.87599515914917,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8972.2548828125,
"min": 8972.2548828125,
"max": 29484.701171875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.54054069519043,
"min": 0.18026265501976013,
"max": 12.54054069519043,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2445.405517578125,
"min": 34.97095489501953,
"max": 2505.1455078125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06648631611503386,
"min": 0.06215271084935617,
"max": 0.07396830529706125,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.26594526446013544,
"min": 0.24861084339742467,
"max": 0.3698415264853062,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17673435690356235,
"min": 0.09940047554018924,
"max": 0.27343076771380853,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7069374276142494,
"min": 0.39760190216075697,
"max": 1.3671538385690427,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.227272727272727,
"min": 2.75,
"max": 25.227272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1110.0,
"min": 121.0,
"max": 1366.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.227272727272727,
"min": 2.75,
"max": 25.227272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1110.0,
"min": 121.0,
"max": 1366.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689292044",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1689292543"
},
"total": 499.474360219,
"count": 1,
"self": 0.43075143999999455,
"children": {
"run_training.setup": {
"total": 0.05000792399999909,
"count": 1,
"self": 0.05000792399999909
},
"TrainerController.start_learning": {
"total": 498.993600855,
"count": 1,
"self": 0.5880310720008879,
"children": {
"TrainerController._reset_env": {
"total": 4.089379120000046,
"count": 1,
"self": 4.089379120000046
},
"TrainerController.advance": {
"total": 494.1759352909991,
"count": 18201,
"self": 0.30424915599905944,
"children": {
"env_step": {
"total": 493.87168613500006,
"count": 18201,
"self": 357.77386607400064,
"children": {
"SubprocessEnvManager._take_step": {
"total": 135.80767412800066,
"count": 18201,
"self": 1.9485818090045086,
"children": {
"TorchPolicy.evaluate": {
"total": 133.85909231899615,
"count": 18201,
"self": 133.85909231899615
}
}
},
"workers": {
"total": 0.2901459329987688,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 497.3372399060041,
"count": 18201,
"is_parallel": true,
"self": 235.14856411101022,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005649292000043715,
"count": 1,
"is_parallel": true,
"self": 0.004162787999973716,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014865040000699992,
"count": 10,
"is_parallel": true,
"self": 0.0014865040000699992
}
}
},
"UnityEnvironment.step": {
"total": 0.037675933000002715,
"count": 1,
"is_parallel": true,
"self": 0.0005611850000377672,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00041024000000788874,
"count": 1,
"is_parallel": true,
"self": 0.00041024000000788874
},
"communicator.exchange": {
"total": 0.033788329000003614,
"count": 1,
"is_parallel": true,
"self": 0.033788329000003614
},
"steps_from_proto": {
"total": 0.002916178999953445,
"count": 1,
"is_parallel": true,
"self": 0.0005920839998907468,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002324095000062698,
"count": 10,
"is_parallel": true,
"self": 0.002324095000062698
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 262.18867579499386,
"count": 18200,
"is_parallel": true,
"self": 10.927727862001404,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.817867067995621,
"count": 18200,
"is_parallel": true,
"self": 5.817867067995621
},
"communicator.exchange": {
"total": 205.85649500000227,
"count": 18200,
"is_parallel": true,
"self": 205.85649500000227
},
"steps_from_proto": {
"total": 39.58658586499456,
"count": 18200,
"is_parallel": true,
"self": 7.211965547014302,
"children": {
"_process_rank_one_or_two_observation": {
"total": 32.37462031798026,
"count": 182000,
"is_parallel": true,
"self": 32.37462031798026
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00015286299992567365,
"count": 1,
"self": 0.00015286299992567365,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 490.16979860498725,
"count": 468330,
"is_parallel": true,
"self": 10.74216311396816,
"children": {
"process_trajectory": {
"total": 270.2515906570193,
"count": 468330,
"is_parallel": true,
"self": 269.3796941320192,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8718965250000679,
"count": 4,
"is_parallel": true,
"self": 0.8718965250000679
}
}
},
"_update_policy": {
"total": 209.1760448339998,
"count": 90,
"is_parallel": true,
"self": 80.83944234499802,
"children": {
"TorchPPOOptimizer.update": {
"total": 128.3366024890018,
"count": 4587,
"is_parallel": true,
"self": 128.3366024890018
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1401025090000303,
"count": 1,
"self": 0.000930520000110846,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13917198899991945,
"count": 1,
"self": 0.13917198899991945
}
}
}
}
}
}
}