ppo-Pyramids / run_logs /timers.json
boruyang's picture
First Push
d01082e verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4450214207172394,
"min": 0.4414829909801483,
"max": 1.4873738288879395,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13350.642578125,
"min": 13223.298828125,
"max": 45120.97265625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989905.0,
"min": 29952.0,
"max": 989905.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989905.0,
"min": 29952.0,
"max": 989905.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.43758314847946167,
"min": -0.08327947556972504,
"max": 0.5274014472961426,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 117.70986938476562,
"min": -19.98707389831543,
"max": 145.86622619628906,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.027456559240818024,
"min": -0.005989507306367159,
"max": 0.3075923025608063,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.385814189910889,
"min": -1.641124963760376,
"max": 73.82215118408203,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06993176419296397,
"min": 0.06492985318942648,
"max": 0.07450622633387824,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9790446987014956,
"min": 0.48335857376196667,
"max": 1.0834755802049996,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013908271566471958,
"min": 0.00023015699846647925,
"max": 0.015912911667486398,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19471580193060742,
"min": 0.002761883981597751,
"max": 0.22278076334480956,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.668054586871429e-06,
"min": 7.668054586871429e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001073527642162,
"min": 0.0001073527642162,
"max": 0.003506912931029099,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10255598571428572,
"min": 0.10255598571428572,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4357838,
"min": 1.3691136000000002,
"max": 2.5725757,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002653429728571429,
"min": 0.0002653429728571429,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037148016200000003,
"min": 0.0037148016200000003,
"max": 0.11692019291000003,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008492152206599712,
"min": 0.008492152206599712,
"max": 0.3906228542327881,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11889012902975082,
"min": 0.11889012902975082,
"max": 2.7343599796295166,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 406.0,
"min": 350.8837209302326,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28014.0,
"min": 15984.0,
"max": 33851.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4755382124772842,
"min": -1.0000000521540642,
"max": 1.6207585936819,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 100.33659844845533,
"min": -32.000001668930054,
"max": 141.0059976503253,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4755382124772842,
"min": -1.0000000521540642,
"max": 1.6207585936819,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 100.33659844845533,
"min": -32.000001668930054,
"max": 141.0059976503253,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0362502126599258,
"min": 0.03262208137785348,
"max": 7.670607476495206,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.4650144608749542,
"min": 2.4650144608749542,
"max": 122.7297196239233,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1706940113",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1706942637"
},
"total": 2524.3102444399997,
"count": 1,
"self": 0.5513084690001051,
"children": {
"run_training.setup": {
"total": 0.04908062399999835,
"count": 1,
"self": 0.04908062399999835
},
"TrainerController.start_learning": {
"total": 2523.7098553469996,
"count": 1,
"self": 1.9261648489432446,
"children": {
"TrainerController._reset_env": {
"total": 3.3156626990003133,
"count": 1,
"self": 3.3156626990003133
},
"TrainerController.advance": {
"total": 2518.366702663057,
"count": 63712,
"self": 2.045310923051602,
"children": {
"env_step": {
"total": 1842.2226173470312,
"count": 63712,
"self": 1679.0862757477557,
"children": {
"SubprocessEnvManager._take_step": {
"total": 161.9514966281472,
"count": 63712,
"self": 5.78313609812767,
"children": {
"TorchPolicy.evaluate": {
"total": 156.16836053001953,
"count": 62555,
"self": 156.16836053001953
}
}
},
"workers": {
"total": 1.1848449711283138,
"count": 63712,
"self": 0.0,
"children": {
"worker_root": {
"total": 2517.3256653120266,
"count": 63712,
"is_parallel": true,
"self": 983.6260426450272,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005608111999663379,
"count": 1,
"is_parallel": true,
"self": 0.004241097999511112,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013670140001522668,
"count": 8,
"is_parallel": true,
"self": 0.0013670140001522668
}
}
},
"UnityEnvironment.step": {
"total": 0.05333102400027201,
"count": 1,
"is_parallel": true,
"self": 0.0006964350009184272,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005291359998409462,
"count": 1,
"is_parallel": true,
"self": 0.0005291359998409462
},
"communicator.exchange": {
"total": 0.050261448999663116,
"count": 1,
"is_parallel": true,
"self": 0.050261448999663116
},
"steps_from_proto": {
"total": 0.0018440039998495195,
"count": 1,
"is_parallel": true,
"self": 0.0003815269997176074,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001462477000131912,
"count": 8,
"is_parallel": true,
"self": 0.001462477000131912
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1533.6996226669994,
"count": 63711,
"is_parallel": true,
"self": 41.879836477938625,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 28.774569862060616,
"count": 63711,
"is_parallel": true,
"self": 28.774569862060616
},
"communicator.exchange": {
"total": 1344.0662955570278,
"count": 63711,
"is_parallel": true,
"self": 1344.0662955570278
},
"steps_from_proto": {
"total": 118.97892076997232,
"count": 63711,
"is_parallel": true,
"self": 25.013950079028746,
"children": {
"_process_rank_one_or_two_observation": {
"total": 93.96497069094357,
"count": 509688,
"is_parallel": true,
"self": 93.96497069094357
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 674.0987743929741,
"count": 63712,
"self": 3.670602042016071,
"children": {
"process_trajectory": {
"total": 139.28155945095523,
"count": 63712,
"self": 139.06700894895494,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2145505020002929,
"count": 2,
"self": 0.2145505020002929
}
}
},
"_update_policy": {
"total": 531.1466129000028,
"count": 445,
"self": 310.83162593602174,
"children": {
"TorchPPOOptimizer.update": {
"total": 220.31498696398103,
"count": 22800,
"self": 220.31498696398103
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.7599968285067e-07,
"count": 1,
"self": 9.7599968285067e-07
},
"TrainerController._save_models": {
"total": 0.10132415999942168,
"count": 1,
"self": 0.001620691999050905,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09970346800037078,
"count": 1,
"self": 0.09970346800037078
}
}
}
}
}
}
}