ppo-Pyramids_v1 / run_logs /timers.json
jeliasherrero's picture
First Push
9f24abe verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.40999701619148254,
"min": 0.39939722418785095,
"max": 1.5147372484207153,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12365.509765625,
"min": 11969.1357421875,
"max": 45951.0703125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989983.0,
"min": 29952.0,
"max": 989983.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989983.0,
"min": 29952.0,
"max": 989983.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.43793773651123047,
"min": -0.26306644082069397,
"max": 0.49982985854148865,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 117.80525207519531,
"min": -62.34674835205078,
"max": 136.45355224609375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.03153499960899353,
"min": -0.03487278148531914,
"max": 0.2576129734516144,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -8.482914924621582,
"min": -9.10179615020752,
"max": 62.342342376708984,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0678381564260538,
"min": 0.06415906015272234,
"max": 0.0734987230508772,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9497341899647532,
"min": 0.4953659168740964,
"max": 1.0542711167557477,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01368161584694116,
"min": 0.0010486669678227043,
"max": 0.015195393866936984,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19154262185717624,
"min": 0.012584003613872453,
"max": 0.21273551413711778,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.395247534950003e-06,
"min": 7.395247534950003e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010353346548930005,
"min": 0.00010353346548930005,
"max": 0.0035059682313439994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10246505000000003,
"min": 0.10246505000000003,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4345107000000004,
"min": 1.3886848,
"max": 2.568656,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002562584950000001,
"min": 0.0002562584950000001,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003587618930000002,
"min": 0.003587618930000002,
"max": 0.11688873439999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011266748420894146,
"min": 0.011266748420894146,
"max": 0.37811270356178284,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1577344834804535,
"min": 0.1577344834804535,
"max": 2.6467888355255127,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 410.3,
"min": 367.56976744186045,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28721.0,
"min": 15984.0,
"max": 32676.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.478357720333086,
"min": -1.0000000521540642,
"max": 1.511655975182851,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 104.9633981436491,
"min": -29.94820163398981,
"max": 122.72139853984118,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.478357720333086,
"min": -1.0000000521540642,
"max": 1.511655975182851,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 104.9633981436491,
"min": -29.94820163398981,
"max": 122.72139853984118,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04724441887523887,
"min": 0.04418748565127745,
"max": 7.3193822633475065,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.35435374014196,
"min": 3.35435374014196,
"max": 117.1101162135601,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1713901103",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1713902714"
},
"total": 1610.7235986870005,
"count": 1,
"self": 0.3262232740012223,
"children": {
"run_training.setup": {
"total": 0.052066349999677186,
"count": 1,
"self": 0.052066349999677186
},
"TrainerController.start_learning": {
"total": 1610.3453090629996,
"count": 1,
"self": 1.213733819967274,
"children": {
"TrainerController._reset_env": {
"total": 2.0323906739999984,
"count": 1,
"self": 2.0323906739999984
},
"TrainerController.advance": {
"total": 1607.0201606620321,
"count": 63819,
"self": 1.1837664341323944,
"children": {
"env_step": {
"total": 1034.8799137969581,
"count": 63819,
"self": 918.8945612169464,
"children": {
"SubprocessEnvManager._take_step": {
"total": 115.23175521703934,
"count": 63819,
"self": 4.181860911012791,
"children": {
"TorchPolicy.evaluate": {
"total": 111.04989430602654,
"count": 62565,
"self": 111.04989430602654
}
}
},
"workers": {
"total": 0.753597362972414,
"count": 63819,
"self": 0.0,
"children": {
"worker_root": {
"total": 1608.3053119790452,
"count": 63819,
"is_parallel": true,
"self": 782.2504078910288,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021141610000086075,
"count": 1,
"is_parallel": true,
"self": 0.0006164050000734278,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014977559999351797,
"count": 8,
"is_parallel": true,
"self": 0.0014977559999351797
}
}
},
"UnityEnvironment.step": {
"total": 0.035098191000088264,
"count": 1,
"is_parallel": true,
"self": 0.0004190849999758939,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00029301399990799837,
"count": 1,
"is_parallel": true,
"self": 0.00029301399990799837
},
"communicator.exchange": {
"total": 0.03325862500014409,
"count": 1,
"is_parallel": true,
"self": 0.03325862500014409
},
"steps_from_proto": {
"total": 0.0011274670000602782,
"count": 1,
"is_parallel": true,
"self": 0.0002618920002532832,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000865574999806995,
"count": 8,
"is_parallel": true,
"self": 0.000865574999806995
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 826.0549040880164,
"count": 63818,
"is_parallel": true,
"self": 20.643056393986626,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 13.310551547000614,
"count": 63818,
"is_parallel": true,
"self": 13.310551547000614
},
"communicator.exchange": {
"total": 731.6305056419769,
"count": 63818,
"is_parallel": true,
"self": 731.6305056419769
},
"steps_from_proto": {
"total": 60.470790505052264,
"count": 63818,
"is_parallel": true,
"self": 13.045876576373757,
"children": {
"_process_rank_one_or_two_observation": {
"total": 47.42491392867851,
"count": 510544,
"is_parallel": true,
"self": 47.42491392867851
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 570.9564804309416,
"count": 63819,
"self": 2.4353573749376665,
"children": {
"process_trajectory": {
"total": 113.62291081901822,
"count": 63819,
"self": 113.43906171601793,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18384910300028423,
"count": 2,
"self": 0.18384910300028423
}
}
},
"_update_policy": {
"total": 454.8982122369857,
"count": 455,
"self": 269.5720326629935,
"children": {
"TorchPPOOptimizer.update": {
"total": 185.32617957399225,
"count": 22806,
"self": 185.32617957399225
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0690000635804608e-06,
"count": 1,
"self": 1.0690000635804608e-06
},
"TrainerController._save_models": {
"total": 0.0790228380001281,
"count": 1,
"self": 0.001402568999765208,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0776202690003629,
"count": 1,
"self": 0.0776202690003629
}
}
}
}
}
}
}