Adulala20's picture
First Push
6283fcd
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.524087905883789,
"min": 1.524087905883789,
"max": 2.890340805053711,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 14568.755859375,
"min": 14568.755859375,
"max": 29631.7734375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 8.589315414428711,
"min": 0.027471229434013367,
"max": 8.589315414428711,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1674.91650390625,
"min": 5.329418659210205,
"max": 1681.61962890625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 21.704545454545453,
"min": 2.6363636363636362,
"max": 22.363636363636363,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 955.0,
"min": 116.0,
"max": 1230.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 21.704545454545453,
"min": 2.6363636363636362,
"max": 22.363636363636363,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 955.0,
"min": 116.0,
"max": 1230.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.03813731559494045,
"min": 0.030352596685144163,
"max": 0.03813731559494045,
"count": 19
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.03813731559494045,
"min": 0.030352596685144163,
"max": 0.03813731559494045,
"count": 19
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.261536289538656,
"min": 0.07737695226879944,
"max": 0.33659222083432333,
"count": 19
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.261536289538656,
"min": 0.07737695226879944,
"max": 0.33659222083432333,
"count": 19
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.488097503999998e-06,
"min": 7.488097503999998e-06,
"max": 0.00028468800510399997,
"count": 19
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 7.488097503999998e-06,
"min": 7.488097503999998e-06,
"max": 0.00028468800510399997,
"count": 19
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10499199999999999,
"min": 0.10499199999999999,
"max": 0.28979199999999994,
"count": 19
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.10499199999999999,
"min": 0.10499199999999999,
"max": 0.28979199999999994,
"count": 19
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00013455039999999988,
"min": 0.00013455039999999988,
"max": 0.0047453104,
"count": 19
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00013455039999999988,
"min": 0.00013455039999999988,
"max": 0.0047453104,
"count": 19
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1692546882",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/snowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1692547414"
},
"total": 531.493570528,
"count": 1,
"self": 0.438592632000109,
"children": {
"run_training.setup": {
"total": 0.050082269000085944,
"count": 1,
"self": 0.050082269000085944
},
"TrainerController.start_learning": {
"total": 531.0048956269998,
"count": 1,
"self": 0.5461930540013782,
"children": {
"TrainerController._reset_env": {
"total": 4.4186103289998755,
"count": 1,
"self": 4.4186103289998755
},
"TrainerController.advance": {
"total": 525.8932921079986,
"count": 18202,
"self": 0.2556655550188225,
"children": {
"env_step": {
"total": 525.6376265529798,
"count": 18202,
"self": 405.4591730799416,
"children": {
"SubprocessEnvManager._take_step": {
"total": 119.91368708700747,
"count": 18202,
"self": 1.9687249190587863,
"children": {
"TorchPolicy.evaluate": {
"total": 117.94496216794869,
"count": 18202,
"self": 117.94496216794869
}
}
},
"workers": {
"total": 0.2647663860307148,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 529.6026522519887,
"count": 18202,
"is_parallel": true,
"self": 290.3153083829852,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018283710001014697,
"count": 1,
"is_parallel": true,
"self": 0.0004956860011589015,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013326849989425682,
"count": 10,
"is_parallel": true,
"self": 0.0013326849989425682
}
}
},
"UnityEnvironment.step": {
"total": 0.06508506799991665,
"count": 1,
"is_parallel": true,
"self": 0.0006614669996451994,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004135310000492609,
"count": 1,
"is_parallel": true,
"self": 0.0004135310000492609
},
"communicator.exchange": {
"total": 0.061839138000323146,
"count": 1,
"is_parallel": true,
"self": 0.061839138000323146
},
"steps_from_proto": {
"total": 0.0021709319998990395,
"count": 1,
"is_parallel": true,
"self": 0.0005372180003178073,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016337139995812322,
"count": 10,
"is_parallel": true,
"self": 0.0016337139995812322
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 239.2873438690035,
"count": 18201,
"is_parallel": true,
"self": 10.342141128946878,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.226137717036181,
"count": 18201,
"is_parallel": true,
"self": 5.226137717036181
},
"communicator.exchange": {
"total": 188.4299399129991,
"count": 18201,
"is_parallel": true,
"self": 188.4299399129991
},
"steps_from_proto": {
"total": 35.28912511002136,
"count": 18201,
"is_parallel": true,
"self": 6.320004223180604,
"children": {
"_process_rank_one_or_two_observation": {
"total": 28.969120886840756,
"count": 182010,
"is_parallel": true,
"self": 28.969120886840756
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00017537399980938062,
"count": 1,
"self": 0.00017537399980938062,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 520.6744152131068,
"count": 594430,
"is_parallel": true,
"self": 13.519625416097824,
"children": {
"process_trajectory": {
"total": 327.58573930000966,
"count": 594430,
"is_parallel": true,
"self": 326.30826315300965,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2774761470000158,
"count": 4,
"is_parallel": true,
"self": 1.2774761470000158
}
}
},
"_update_policy": {
"total": 179.56905049699935,
"count": 19,
"is_parallel": true,
"self": 129.0545505799896,
"children": {
"TorchPPOOptimizer.update": {
"total": 50.51449991700974,
"count": 2611,
"is_parallel": true,
"self": 50.51449991700974
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.14662476200010133,
"count": 1,
"self": 0.0010891460001403175,
"children": {
"RLTrainer._checkpoint": {
"total": 0.145535615999961,
"count": 1,
"self": 0.145535615999961
}
}
}
}
}
}
}