ppo-PyramidsRND / run_logs /timers.json
hui168's picture
First Push
e2a0e4e verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.32803598046302795,
"min": 0.31065699458122253,
"max": 1.4080204963684082,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9757.1025390625,
"min": 9245.15234375,
"max": 42713.7109375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989897.0,
"min": 29952.0,
"max": 989897.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989897.0,
"min": 29952.0,
"max": 989897.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5701628923416138,
"min": -0.09470613300800323,
"max": 0.5701628923416138,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 161.92625427246094,
"min": -22.91888427734375,
"max": 161.92625427246094,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.006555818486958742,
"min": -0.018487349152565002,
"max": 0.3011987507343292,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.8618524074554443,
"min": -5.028558731079102,
"max": 72.58889770507812,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06547037909331266,
"min": 0.0652124868184201,
"max": 0.07489496147792334,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9820556863996899,
"min": 0.5162939932081921,
"max": 1.0715519837685858,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017850920632352225,
"min": 0.0011695900533622346,
"max": 0.017850920632352225,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2677638094852834,
"min": 0.014942314233970218,
"max": 0.2677638094852834,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.471157509646661e-06,
"min": 7.471157509646661e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011206736264469991,
"min": 0.00011206736264469991,
"max": 0.0033828191723937,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249035333333333,
"min": 0.10249035333333333,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5373553,
"min": 1.3886848,
"max": 2.5276063,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025878629799999984,
"min": 0.00025878629799999984,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038817944699999973,
"min": 0.0038817944699999973,
"max": 0.11278786937000003,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009251771494746208,
"min": 0.00880469474941492,
"max": 0.40506792068481445,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13877657055854797,
"min": 0.12326572835445404,
"max": 2.835475444793701,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 305.3578947368421,
"min": 305.3578947368421,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29009.0,
"min": 15984.0,
"max": 33030.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.612387484094749,
"min": -1.0000000521540642,
"max": 1.612387484094749,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 154.7891984730959,
"min": -30.689401648938656,
"max": 154.7891984730959,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.612387484094749,
"min": -1.0000000521540642,
"max": 1.612387484094749,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 154.7891984730959,
"min": -30.689401648938656,
"max": 154.7891984730959,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02873739927645147,
"min": 0.02873739927645147,
"max": 7.933095519430935,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.758790330539341,
"min": 2.705439758632565,
"max": 126.92952831089497,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1711724312",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1711727663"
},
"total": 3351.493752533,
"count": 1,
"self": 0.7004167509999206,
"children": {
"run_training.setup": {
"total": 0.06798957599994537,
"count": 1,
"self": 0.06798957599994537
},
"TrainerController.start_learning": {
"total": 3350.7253462060003,
"count": 1,
"self": 2.2410491389882736,
"children": {
"TrainerController._reset_env": {
"total": 3.31049828700003,
"count": 1,
"self": 3.31049828700003
},
"TrainerController.advance": {
"total": 3345.091753343012,
"count": 63853,
"self": 2.6532423560602183,
"children": {
"env_step": {
"total": 2247.283545811972,
"count": 63853,
"self": 2078.10220225791,
"children": {
"SubprocessEnvManager._take_step": {
"total": 167.78450095800667,
"count": 63853,
"self": 7.137129368023295,
"children": {
"TorchPolicy.evaluate": {
"total": 160.64737158998338,
"count": 62557,
"self": 160.64737158998338
}
}
},
"workers": {
"total": 1.3968425960556488,
"count": 63853,
"self": 0.0,
"children": {
"worker_root": {
"total": 3344.171026750019,
"count": 63853,
"is_parallel": true,
"self": 1451.1277242449655,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007077594999998382,
"count": 1,
"is_parallel": true,
"self": 0.004827194999961648,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002250400000036734,
"count": 8,
"is_parallel": true,
"self": 0.002250400000036734
}
}
},
"UnityEnvironment.step": {
"total": 0.06975659900001574,
"count": 1,
"is_parallel": true,
"self": 0.0008005889999935789,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005394070000193096,
"count": 1,
"is_parallel": true,
"self": 0.0005394070000193096
},
"communicator.exchange": {
"total": 0.06636601499997141,
"count": 1,
"is_parallel": true,
"self": 0.06636601499997141
},
"steps_from_proto": {
"total": 0.002050588000031439,
"count": 1,
"is_parallel": true,
"self": 0.0005316340000263153,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015189540000051238,
"count": 8,
"is_parallel": true,
"self": 0.0015189540000051238
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1893.0433025050534,
"count": 63852,
"is_parallel": true,
"self": 52.07784769098748,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 28.1077516219803,
"count": 63852,
"is_parallel": true,
"self": 28.1077516219803
},
"communicator.exchange": {
"total": 1680.0425125340398,
"count": 63852,
"is_parallel": true,
"self": 1680.0425125340398
},
"steps_from_proto": {
"total": 132.81519065804576,
"count": 63852,
"is_parallel": true,
"self": 28.632330043085403,
"children": {
"_process_rank_one_or_two_observation": {
"total": 104.18286061496036,
"count": 510816,
"is_parallel": true,
"self": 104.18286061496036
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1095.1549651749795,
"count": 63853,
"self": 4.465817367983618,
"children": {
"process_trajectory": {
"total": 165.97743800599886,
"count": 63853,
"self": 165.6474739329987,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3299640730001556,
"count": 2,
"self": 0.3299640730001556
}
}
},
"_update_policy": {
"total": 924.7117098009969,
"count": 450,
"self": 363.3477507919903,
"children": {
"TorchPPOOptimizer.update": {
"total": 561.3639590090066,
"count": 22812,
"self": 561.3639590090066
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.041999894368928e-06,
"count": 1,
"self": 1.041999894368928e-06
},
"TrainerController._save_models": {
"total": 0.08204439500013905,
"count": 1,
"self": 0.002024855999934516,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08001953900020453,
"count": 1,
"self": 0.08001953900020453
}
}
}
}
}
}
}