MarshallPF's picture
First Push
777f084
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.082984447479248,
"min": 1.0797501802444458,
"max": 2.860104560852051,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10340.3359375,
"min": 10340.3359375,
"max": 29321.791015625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.40615463256836,
"min": 0.40753763914108276,
"max": 13.40615463256836,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2614.2001953125,
"min": 79.06230163574219,
"max": 2728.0126953125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06750854941936482,
"min": 0.062376987117664505,
"max": 0.07549359000368254,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2700341976774593,
"min": 0.25303481892671487,
"max": 0.37100250530613055,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.22519864571992965,
"min": 0.12455417616172995,
"max": 0.27167565364609747,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.9007945828797186,
"min": 0.4982167046469198,
"max": 1.3149735193480463,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000007e-06,
"min": 8.082097306000007e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400003e-05,
"min": 3.232838922400003e-05,
"max": 0.0013851600382800001,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.197294,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.9617200000000001,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.004864970599999999,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828000000003,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.954545454545453,
"min": 3.977272727272727,
"max": 26.763636363636362,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1142.0,
"min": 175.0,
"max": 1472.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.954545454545453,
"min": 3.977272727272727,
"max": 26.763636363636362,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1142.0,
"min": 175.0,
"max": 1472.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681131052",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1681131549"
},
"total": 496.87048868600004,
"count": 1,
"self": 0.3892004780000775,
"children": {
"run_training.setup": {
"total": 0.12186843099999578,
"count": 1,
"self": 0.12186843099999578
},
"TrainerController.start_learning": {
"total": 496.35941977699997,
"count": 1,
"self": 0.5199413099957724,
"children": {
"TrainerController._reset_env": {
"total": 3.911306823000018,
"count": 1,
"self": 3.911306823000018
},
"TrainerController.advance": {
"total": 491.7789665010043,
"count": 18203,
"self": 0.27169876201816123,
"children": {
"env_step": {
"total": 491.5072677389861,
"count": 18203,
"self": 367.6366166099875,
"children": {
"SubprocessEnvManager._take_step": {
"total": 123.61097629599914,
"count": 18203,
"self": 1.8067814910008053,
"children": {
"TorchPolicy.evaluate": {
"total": 121.80419480499833,
"count": 18203,
"self": 121.80419480499833
}
}
},
"workers": {
"total": 0.25967483299945116,
"count": 18203,
"self": 0.0,
"children": {
"worker_root": {
"total": 494.9292554899973,
"count": 18203,
"is_parallel": true,
"self": 249.28526899400322,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004937186999995902,
"count": 1,
"is_parallel": true,
"self": 0.0034824040000387413,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014547829999571604,
"count": 10,
"is_parallel": true,
"self": 0.0014547829999571604
}
}
},
"UnityEnvironment.step": {
"total": 0.037087414000012586,
"count": 1,
"is_parallel": true,
"self": 0.0005788890000246738,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003198880000070403,
"count": 1,
"is_parallel": true,
"self": 0.0003198880000070403
},
"communicator.exchange": {
"total": 0.03413464799999133,
"count": 1,
"is_parallel": true,
"self": 0.03413464799999133
},
"steps_from_proto": {
"total": 0.002053988999989542,
"count": 1,
"is_parallel": true,
"self": 0.0004294250000498323,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00162456399993971,
"count": 10,
"is_parallel": true,
"self": 0.00162456399993971
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 245.64398649599406,
"count": 18202,
"is_parallel": true,
"self": 10.053901829981811,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.399802635009451,
"count": 18202,
"is_parallel": true,
"self": 5.399802635009451
},
"communicator.exchange": {
"total": 198.5015058300046,
"count": 18202,
"is_parallel": true,
"self": 198.5015058300046
},
"steps_from_proto": {
"total": 31.688776200998205,
"count": 18202,
"is_parallel": true,
"self": 6.237527612013508,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.451248588984697,
"count": 182020,
"is_parallel": true,
"self": 25.451248588984697
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00011956899993492698,
"count": 1,
"self": 0.00011956899993492698,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 488.2792785530055,
"count": 410197,
"is_parallel": true,
"self": 10.185254223007291,
"children": {
"process_trajectory": {
"total": 250.40798788099812,
"count": 410197,
"is_parallel": true,
"self": 248.97010533599826,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4378825449998658,
"count": 4,
"is_parallel": true,
"self": 1.4378825449998658
}
}
},
"_update_policy": {
"total": 227.68603644900008,
"count": 90,
"is_parallel": true,
"self": 86.29239989199922,
"children": {
"TorchPPOOptimizer.update": {
"total": 141.39363655700086,
"count": 6116,
"is_parallel": true,
"self": 141.39363655700086
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1490855739999688,
"count": 1,
"self": 0.0012474510000402006,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1478381229999286,
"count": 1,
"self": 0.1478381229999286
}
}
}
}
}
}
}