ppo-Pyramids / run_logs /timers.json
hugo-massonnat's picture
First Push
3573d11 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.8349014520645142,
"min": 0.6994969844818115,
"max": 1.3549772500991821,
"count": 7
},
"Pyramids.Policy.Entropy.sum": {
"value": 24966.892578125,
"min": 20951.333984375,
"max": 41104.58984375,
"count": 7
},
"Pyramids.Step.mean": {
"value": 209899.0,
"min": 29914.0,
"max": 209899.0,
"count": 7
},
"Pyramids.Step.sum": {
"value": 209899.0,
"min": 29914.0,
"max": 209899.0,
"count": 7
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.06242280825972557,
"min": -0.10216616839170456,
"max": 0.0034452727995812893,
"count": 7
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -14.981473922729492,
"min": -24.622047424316406,
"max": 0.8165296316146851,
"count": 7
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.08499173820018768,
"min": 0.08499173820018768,
"max": 0.4416685998439789,
"count": 7
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 20.39801788330078,
"min": 20.39801788330078,
"max": 104.67546081542969,
"count": 7
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06809990362590147,
"min": 0.06515720497297345,
"max": 0.07226017955044188,
"count": 7
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9533986507626205,
"min": 0.5212576397837876,
"max": 0.9533986507626205,
"count": 7
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0012558790535748398,
"min": 0.00021666575314515318,
"max": 0.008709266197464777,
"count": 7
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.017582306750047755,
"min": 0.0028166547908869913,
"max": 0.06967412957971822,
"count": 7
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00028053029220419045,
"min": 0.00028053029220419045,
"max": 0.0002984042755319083,
"count": 7
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.003927424090858667,
"min": 0.0023872342042552666,
"max": 0.004011096262967933,
"count": 7
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.19351009523809523,
"min": 0.19351009523809523,
"max": 0.19946809166666668,
"count": 7
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.709141333333333,
"min": 1.5957447333333334,
"max": 2.737032066666667,
"count": 7
},
"Pyramids.Policy.Beta.mean": {
"value": 0.009351658514285715,
"min": 0.009351658514285715,
"max": 0.009946862357499999,
"count": 7
},
"Pyramids.Policy.Beta.sum": {
"value": 0.13092321920000002,
"min": 0.07957489885999999,
"max": 0.13370950346,
"count": 7
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.07266704738140106,
"min": 0.07266704738140106,
"max": 0.6021338701248169,
"count": 7
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 1.0173386335372925,
"min": 1.0173386335372925,
"max": 4.817070960998535,
"count": 7
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 988.9677419354839,
"min": 955.875,
"max": 999.0,
"count": 7
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30658.0,
"min": 15946.0,
"max": 33309.0,
"count": 7
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.9251742444692119,
"min": -0.9997875518165529,
"max": -0.6440187974367291,
"count": 7
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -28.68040157854557,
"min": -31.993201658129692,
"max": -13.961000882089138,
"count": 7
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.9251742444692119,
"min": -0.9997875518165529,
"max": -0.6440187974367291,
"count": 7
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -28.68040157854557,
"min": -31.993201658129692,
"max": -13.961000882089138,
"count": 7
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.7498406122528738,
"min": 0.7498406122528738,
"max": 12.433864971622825,
"count": 7
},
"Pyramids.Policy.RndReward.sum": {
"value": 23.245058979839087,
"min": 23.245058979839087,
"max": 198.9418395459652,
"count": 7
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 7
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 7
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1707839025",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1707839662"
},
"total": 637.1006291660001,
"count": 1,
"self": 0.45747478500061334,
"children": {
"run_training.setup": {
"total": 0.06607223499986503,
"count": 1,
"self": 0.06607223499986503
},
"TrainerController.start_learning": {
"total": 636.5770821459996,
"count": 1,
"self": 0.4658599350264012,
"children": {
"TrainerController._reset_env": {
"total": 4.658360471999913,
"count": 1,
"self": 4.658360471999913
},
"TrainerController.advance": {
"total": 631.2683456999735,
"count": 13982,
"self": 0.5772789168800045,
"children": {
"env_step": {
"total": 407.08119793506785,
"count": 13982,
"self": 371.79794930504977,
"children": {
"SubprocessEnvManager._take_step": {
"total": 34.96824750900032,
"count": 13982,
"self": 1.5096203989833157,
"children": {
"TorchPolicy.evaluate": {
"total": 33.458627110017005,
"count": 13887,
"self": 33.458627110017005
}
}
},
"workers": {
"total": 0.3150011210177581,
"count": 13981,
"self": 0.0,
"children": {
"worker_root": {
"total": 635.2425472999812,
"count": 13981,
"is_parallel": true,
"self": 299.81383496498665,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0030181719998836343,
"count": 1,
"is_parallel": true,
"self": 0.0010439939997013425,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019741780001822917,
"count": 8,
"is_parallel": true,
"self": 0.0019741780001822917
}
}
},
"UnityEnvironment.step": {
"total": 0.06944988599980206,
"count": 1,
"is_parallel": true,
"self": 0.0006752090002919431,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00036826199993811315,
"count": 1,
"is_parallel": true,
"self": 0.00036826199993811315
},
"communicator.exchange": {
"total": 0.06634615199982363,
"count": 1,
"is_parallel": true,
"self": 0.06634615199982363
},
"steps_from_proto": {
"total": 0.0020602629997483746,
"count": 1,
"is_parallel": true,
"self": 0.0004075410006407765,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001652721999107598,
"count": 8,
"is_parallel": true,
"self": 0.001652721999107598
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 335.42871233499454,
"count": 13980,
"is_parallel": true,
"self": 10.353858517044046,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.201977781960977,
"count": 13980,
"is_parallel": true,
"self": 6.201977781960977
},
"communicator.exchange": {
"total": 290.82689413601565,
"count": 13980,
"is_parallel": true,
"self": 290.82689413601565
},
"steps_from_proto": {
"total": 28.045981899973867,
"count": 13980,
"is_parallel": true,
"self": 5.958044389975839,
"children": {
"_process_rank_one_or_two_observation": {
"total": 22.087937509998028,
"count": 111840,
"is_parallel": true,
"self": 22.087937509998028
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 223.6098688480256,
"count": 13981,
"self": 0.8528804820275582,
"children": {
"process_trajectory": {
"total": 33.42286806100037,
"count": 13981,
"self": 33.42286806100037
},
"_update_policy": {
"total": 189.33412030499767,
"count": 93,
"self": 76.86337213900197,
"children": {
"TorchPPOOptimizer.update": {
"total": 112.4707481659957,
"count": 5061,
"self": 112.4707481659957
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.8430000636726618e-06,
"count": 1,
"self": 1.8430000636726618e-06
},
"TrainerController._save_models": {
"total": 0.1845141959997818,
"count": 1,
"self": 0.0034200199997940217,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18109417599998778,
"count": 1,
"self": 0.18109417599998778
}
}
}
}
}
}
}