GGunjan's picture
!first
9c85452
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8250987529754639,
"min": 0.8250987529754639,
"max": 2.83732533454895,
"count": 50
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8032.33642578125,
"min": 8032.33642578125,
"max": 29119.46875,
"count": 50
},
"SnowballTarget.Step.mean": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Step.sum": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.080318450927734,
"min": 0.44762668013572693,
"max": 14.106208801269531,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2886.46533203125,
"min": 86.8395767211914,
"max": 2886.46533203125,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06977067743449453,
"min": 0.06353342460189357,
"max": 0.0736068118700453,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3488533871724726,
"min": 0.25413369840757427,
"max": 0.35727567289061496,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1579944309679901,
"min": 0.1443296899252078,
"max": 0.28146748658927045,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7899721548399505,
"min": 0.5773187597008312,
"max": 1.3691867515444756,
"count": 50
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 3.0528989824000023e-06,
"min": 3.0528989824000023e-06,
"max": 0.00029675280108239997,
"count": 50
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.526449491200001e-05,
"min": 1.526449491200001e-05,
"max": 0.001454064015312,
"count": 50
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10101760000000001,
"min": 0.10101760000000001,
"max": 0.19891760000000003,
"count": 50
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5050880000000001,
"min": 0.4119904000000001,
"max": 0.9846880000000001,
"count": 50
},
"SnowballTarget.Policy.Beta.mean": {
"value": 6.077824000000005e-05,
"min": 6.077824000000005e-05,
"max": 0.00494598824,
"count": 50
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00030389120000000027,
"min": 0.00030389120000000027,
"max": 0.024235931200000005,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 27.763636363636362,
"min": 4.590909090909091,
"max": 28.022727272727273,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1527.0,
"min": 202.0,
"max": 1531.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 27.763636363636362,
"min": 4.590909090909091,
"max": 28.022727272727273,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1527.0,
"min": 202.0,
"max": 1531.0,
"count": 50
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680117656",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/ppo/SnowballTarget.yaml --env=/content/ml-agents/training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget2 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680119290"
},
"total": 1634.4200644360003,
"count": 1,
"self": 0.37332009300007485,
"children": {
"run_training.setup": {
"total": 0.12031972200020391,
"count": 1,
"self": 0.12031972200020391
},
"TrainerController.start_learning": {
"total": 1633.926424621,
"count": 1,
"self": 1.207905044056588,
"children": {
"TrainerController._reset_env": {
"total": 6.893835688000308,
"count": 1,
"self": 6.893835688000308
},
"TrainerController.advance": {
"total": 1625.698550199943,
"count": 45499,
"self": 0.6737013268839291,
"children": {
"env_step": {
"total": 1625.024848873059,
"count": 45499,
"self": 1318.789030222952,
"children": {
"SubprocessEnvManager._take_step": {
"total": 305.5900266670933,
"count": 45499,
"self": 5.077906725049161,
"children": {
"TorchPolicy.evaluate": {
"total": 300.51211994204414,
"count": 45499,
"self": 300.51211994204414
}
}
},
"workers": {
"total": 0.6457919830136234,
"count": 45499,
"self": 0.0,
"children": {
"worker_root": {
"total": 1630.9105573719426,
"count": 45499,
"is_parallel": true,
"self": 1063.7236828319856,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018819829997482884,
"count": 1,
"is_parallel": true,
"self": 0.0005624149989671423,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013195680007811461,
"count": 10,
"is_parallel": true,
"self": 0.0013195680007811461
}
}
},
"UnityEnvironment.step": {
"total": 0.12093200999970577,
"count": 1,
"is_parallel": true,
"self": 0.003153456000291044,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004014239998468838,
"count": 1,
"is_parallel": true,
"self": 0.0004014239998468838
},
"communicator.exchange": {
"total": 0.11538751199987018,
"count": 1,
"is_parallel": true,
"self": 0.11538751199987018
},
"steps_from_proto": {
"total": 0.001989617999697657,
"count": 1,
"is_parallel": true,
"self": 0.00042029800033560605,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001569319999362051,
"count": 10,
"is_parallel": true,
"self": 0.001569319999362051
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 567.186874539957,
"count": 45498,
"is_parallel": true,
"self": 22.638055046035788,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 12.498984581891364,
"count": 45498,
"is_parallel": true,
"self": 12.498984581891364
},
"communicator.exchange": {
"total": 459.3451663080218,
"count": 45498,
"is_parallel": true,
"self": 459.3451663080218
},
"steps_from_proto": {
"total": 72.70466860400802,
"count": 45498,
"is_parallel": true,
"self": 13.906194310128285,
"children": {
"_process_rank_one_or_two_observation": {
"total": 58.798474293879735,
"count": 454980,
"is_parallel": true,
"self": 58.798474293879735
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00011218400004509022,
"count": 1,
"self": 0.00011218400004509022,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1618.5059237209935,
"count": 876449,
"is_parallel": true,
"self": 21.63062269703005,
"children": {
"process_trajectory": {
"total": 581.0125831989626,
"count": 876449,
"is_parallel": true,
"self": 577.8292616359631,
"children": {
"RLTrainer._checkpoint": {
"total": 3.183321562999481,
"count": 10,
"is_parallel": true,
"self": 3.183321562999481
}
}
},
"_update_policy": {
"total": 1015.8627178250008,
"count": 227,
"is_parallel": true,
"self": 459.4786093919656,
"children": {
"TorchPPOOptimizer.update": {
"total": 556.3841084330352,
"count": 38580,
"is_parallel": true,
"self": 556.3841084330352
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1260215050001534,
"count": 1,
"self": 0.0008633620000182418,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12515814300013517,
"count": 1,
"self": 0.12515814300013517
}
}
}
}
}
}
}