ppo-Pyramids / run_logs /timers.json
joydragon's picture
First Commit
e36e009
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.7388007044792175,
"min": 0.7309030294418335,
"max": 1.352487564086914,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 22104.91796875,
"min": 21915.396484375,
"max": 41029.0625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989946.0,
"min": 29952.0,
"max": 989946.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989946.0,
"min": 29952.0,
"max": 989946.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3870429992675781,
"min": -0.102423295378685,
"max": 0.4368783235549927,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 101.40526580810547,
"min": -24.58159065246582,
"max": 115.46650695800781,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.6684350371360779,
"min": -0.03654031828045845,
"max": 0.6684350371360779,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 175.12997436523438,
"min": -9.610103607177734,
"max": 175.12997436523438,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06697335998233896,
"min": 0.06463177804142192,
"max": 0.07360926340248518,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9376270397527453,
"min": 0.509531025694825,
"max": 1.0491946983526634,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.05182982837071731,
"min": 0.00010431107823909638,
"max": 0.05182982837071731,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.7256175971900424,
"min": 0.001356044017108253,
"max": 0.7256175971900424,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.522661778192853e-06,
"min": 7.522661778192853e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010531726489469995,
"min": 0.00010531726489469995,
"max": 0.0035083277305574994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250752142857145,
"min": 0.10250752142857145,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4351053000000003,
"min": 1.3886848,
"max": 2.5694425000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002605013907142856,
"min": 0.0002605013907142856,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003647019469999998,
"min": 0.003647019469999998,
"max": 0.11696730575000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010956819169223309,
"min": 0.010956819169223309,
"max": 0.5902058482170105,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15339547395706177,
"min": 0.15339547395706177,
"max": 4.131441116333008,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 457.7258064516129,
"min": 420.18840579710144,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28379.0,
"min": 15984.0,
"max": 32727.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4777386837428617,
"min": -1.0000000521540642,
"max": 1.4777386837428617,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 91.61979839205742,
"min": -31.994001641869545,
"max": 99.00439910590649,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4777386837428617,
"min": -1.0000000521540642,
"max": 1.4777386837428617,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 91.61979839205742,
"min": -31.994001641869545,
"max": 99.00439910590649,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05127620012424285,
"min": 0.04797805202964599,
"max": 12.12938067317009,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.179124407703057,
"min": 3.179124407703057,
"max": 194.07009077072144,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690519591",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690521720"
},
"total": 2129.059600501,
"count": 1,
"self": 0.49079215900019335,
"children": {
"run_training.setup": {
"total": 0.04009146300006705,
"count": 1,
"self": 0.04009146300006705
},
"TrainerController.start_learning": {
"total": 2128.528716879,
"count": 1,
"self": 1.2905530479906702,
"children": {
"TrainerController._reset_env": {
"total": 5.673141665000003,
"count": 1,
"self": 5.673141665000003
},
"TrainerController.advance": {
"total": 2121.4683262150093,
"count": 63499,
"self": 1.3397961900059272,
"children": {
"env_step": {
"total": 1465.1186691410085,
"count": 63499,
"self": 1359.910134592059,
"children": {
"SubprocessEnvManager._take_step": {
"total": 104.40756369499809,
"count": 63499,
"self": 4.544769550013939,
"children": {
"TorchPolicy.evaluate": {
"total": 99.86279414498415,
"count": 62570,
"self": 99.86279414498415
}
}
},
"workers": {
"total": 0.8009708539515259,
"count": 63499,
"self": 0.0,
"children": {
"worker_root": {
"total": 2123.8423002649506,
"count": 63499,
"is_parallel": true,
"self": 873.6422860289599,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005351331000042592,
"count": 1,
"is_parallel": true,
"self": 0.00398783400009961,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013634969999429813,
"count": 8,
"is_parallel": true,
"self": 0.0013634969999429813
}
}
},
"UnityEnvironment.step": {
"total": 0.04939262499999586,
"count": 1,
"is_parallel": true,
"self": 0.0006072620000168172,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004958630000828634,
"count": 1,
"is_parallel": true,
"self": 0.0004958630000828634
},
"communicator.exchange": {
"total": 0.046303006999892204,
"count": 1,
"is_parallel": true,
"self": 0.046303006999892204
},
"steps_from_proto": {
"total": 0.001986493000003975,
"count": 1,
"is_parallel": true,
"self": 0.00038619400027073425,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016002989997332406,
"count": 8,
"is_parallel": true,
"self": 0.0016002989997332406
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1250.2000142359907,
"count": 63498,
"is_parallel": true,
"self": 34.595276643995476,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.07112084896994,
"count": 63498,
"is_parallel": true,
"self": 23.07112084896994
},
"communicator.exchange": {
"total": 1091.0033152240176,
"count": 63498,
"is_parallel": true,
"self": 1091.0033152240176
},
"steps_from_proto": {
"total": 101.5303015190076,
"count": 63498,
"is_parallel": true,
"self": 20.45111614986888,
"children": {
"_process_rank_one_or_two_observation": {
"total": 81.07918536913871,
"count": 507984,
"is_parallel": true,
"self": 81.07918536913871
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 655.0098608839949,
"count": 63499,
"self": 2.360604709960967,
"children": {
"process_trajectory": {
"total": 107.74699612603786,
"count": 63499,
"self": 107.48648748603796,
"children": {
"RLTrainer._checkpoint": {
"total": 0.26050863999989815,
"count": 2,
"self": 0.26050863999989815
}
}
},
"_update_policy": {
"total": 544.902260047996,
"count": 448,
"self": 356.93628246099,
"children": {
"TorchPPOOptimizer.update": {
"total": 187.96597758700602,
"count": 22863,
"self": 187.96597758700602
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.02400008472614e-06,
"count": 1,
"self": 1.02400008472614e-06
},
"TrainerController._save_models": {
"total": 0.09669492700004412,
"count": 1,
"self": 0.0013963810001769161,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0952985459998672,
"count": 1,
"self": 0.0952985459998672
}
}
}
}
}
}
}