ppo-Pyramids / run_logs /timers.json
DaniilSirota's picture
First Push
54c9405
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5357601642608643,
"min": 0.5088276267051697,
"max": 1.4522984027862549,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16072.8046875,
"min": 15240.4052734375,
"max": 44056.92578125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989873.0,
"min": 29952.0,
"max": 989873.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989873.0,
"min": 29952.0,
"max": 989873.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.0071153221651911736,
"min": -0.10672230273485184,
"max": 0.012407811358571053,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 1.7290233373641968,
"min": -25.613351821899414,
"max": 3.0399138927459717,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.03892119601368904,
"min": 0.033085934817790985,
"max": 0.40898939967155457,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 9.457850456237793,
"min": 7.973710060119629,
"max": 98.56644439697266,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06697644244436649,
"min": 0.0654237366384006,
"max": 0.07403246914213138,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0046466366654974,
"min": 0.5053428415973114,
"max": 1.0357415643172547,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0037623612718520457,
"min": 0.0007707271572960966,
"max": 0.015784408869798986,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.056435419077780685,
"min": 0.010019453044849256,
"max": 0.11049086208859289,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.497017501026671e-06,
"min": 7.497017501026671e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011245526251540006,
"min": 0.00011245526251540006,
"max": 0.0036328360890546998,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249897333333338,
"min": 0.10249897333333338,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5374846000000006,
"min": 1.3886848,
"max": 2.6109453,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002596474360000001,
"min": 0.0002596474360000001,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038947115400000017,
"min": 0.0038947115400000017,
"max": 0.12111343547000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.038474105298519135,
"min": 0.038474105298519135,
"max": 0.7031012177467346,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.5771116018295288,
"min": 0.5491717457771301,
"max": 4.921708583831787,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 876.8611111111111,
"min": 840.5714285714286,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31567.0,
"min": 15984.0,
"max": 33214.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.2661833701034387,
"min": -1.0000000521540642,
"max": -0.0980971872806549,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -9.582601323723793,
"min": -29.94000168889761,
"max": -3.4334015548229218,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.2661833701034387,
"min": -1.0000000521540642,
"max": -0.0980971872806549,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -9.582601323723793,
"min": -29.94000168889761,
"max": -3.4334015548229218,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.3352287486777641,
"min": 0.3352287486777641,
"max": 15.321282395161688,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 12.068234952399507,
"min": 10.928652231115848,
"max": 245.140518322587,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675938264",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675940965"
},
"total": 2700.594042652,
"count": 1,
"self": 0.5600444410001728,
"children": {
"run_training.setup": {
"total": 0.12836563799999112,
"count": 1,
"self": 0.12836563799999112
},
"TrainerController.start_learning": {
"total": 2699.905632573,
"count": 1,
"self": 1.9988755669746752,
"children": {
"TrainerController._reset_env": {
"total": 10.946847110000022,
"count": 1,
"self": 10.946847110000022
},
"TrainerController.advance": {
"total": 2686.8629812440254,
"count": 63192,
"self": 2.256882107024012,
"children": {
"env_step": {
"total": 1793.1532028809709,
"count": 63192,
"self": 1635.5317727769334,
"children": {
"SubprocessEnvManager._take_step": {
"total": 156.3423407440314,
"count": 63192,
"self": 5.985722934967612,
"children": {
"TorchPolicy.evaluate": {
"total": 150.35661780906378,
"count": 62554,
"self": 50.59869658700245,
"children": {
"TorchPolicy.sample_actions": {
"total": 99.75792122206133,
"count": 62554,
"self": 99.75792122206133
}
}
}
}
},
"workers": {
"total": 1.2790893600060826,
"count": 63192,
"self": 0.0,
"children": {
"worker_root": {
"total": 2692.624373085972,
"count": 63192,
"is_parallel": true,
"self": 1212.6660048139906,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.009556531000043833,
"count": 1,
"is_parallel": true,
"self": 0.0028904130000455552,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.006666117999998278,
"count": 8,
"is_parallel": true,
"self": 0.006666117999998278
}
}
},
"UnityEnvironment.step": {
"total": 0.10855123900000763,
"count": 1,
"is_parallel": true,
"self": 0.0006017640001800828,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000524024999890571,
"count": 1,
"is_parallel": true,
"self": 0.000524024999890571
},
"communicator.exchange": {
"total": 0.10544823099996847,
"count": 1,
"is_parallel": true,
"self": 0.10544823099996847
},
"steps_from_proto": {
"total": 0.0019772189999684997,
"count": 1,
"is_parallel": true,
"self": 0.0005828700000165554,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013943489999519443,
"count": 8,
"is_parallel": true,
"self": 0.0013943489999519443
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1479.9583682719813,
"count": 63191,
"is_parallel": true,
"self": 40.12069666586535,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 27.931716067026287,
"count": 63191,
"is_parallel": true,
"self": 27.931716067026287
},
"communicator.exchange": {
"total": 1295.518891534015,
"count": 63191,
"is_parallel": true,
"self": 1295.518891534015
},
"steps_from_proto": {
"total": 116.38706400507476,
"count": 63191,
"is_parallel": true,
"self": 29.027353441178207,
"children": {
"_process_rank_one_or_two_observation": {
"total": 87.35971056389656,
"count": 505528,
"is_parallel": true,
"self": 87.35971056389656
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 891.4528962560306,
"count": 63192,
"self": 3.9004900770332824,
"children": {
"process_trajectory": {
"total": 190.327453059993,
"count": 63192,
"self": 190.10622631199305,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22122674799993547,
"count": 2,
"self": 0.22122674799993547
}
}
},
"_update_policy": {
"total": 697.2249531190043,
"count": 447,
"self": 269.6145207270274,
"children": {
"TorchPPOOptimizer.update": {
"total": 427.61043239197693,
"count": 22815,
"self": 427.61043239197693
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.619998309062794e-07,
"count": 1,
"self": 9.619998309062794e-07
},
"TrainerController._save_models": {
"total": 0.09692769000002954,
"count": 1,
"self": 0.00163786999974036,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09528982000028918,
"count": 1,
"self": 0.09528982000028918
}
}
}
}
}
}
}