ppo-Pyramids / run_logs /timers.json
pratsy's picture
First Push
c7daecb
raw
history blame
18.8 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.1264774203300476,
"min": 0.12133873254060745,
"max": 1.469253420829773,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 3784.204345703125,
"min": 3669.283203125,
"max": 44571.2734375,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999906.0,
"min": 29986.0,
"max": 2999906.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999906.0,
"min": 29986.0,
"max": 2999906.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.82927405834198,
"min": -0.07572072744369507,
"max": 0.9394945502281189,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 250.44076538085938,
"min": -18.021533966064453,
"max": 295.00128173828125,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.006171045824885368,
"min": -0.019276633858680725,
"max": 0.22240948677062988,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -1.8636558055877686,
"min": -5.956480026245117,
"max": 52.93345642089844,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0692730050572815,
"min": 0.06380657659869433,
"max": 0.07588517239185673,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0390950758592226,
"min": 0.5608416392517092,
"max": 1.0846602109183245,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015756846436609826,
"min": 0.0003106717606486122,
"max": 0.017262691297504466,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23635269654914737,
"min": 0.003728061127783346,
"max": 0.24167767816506253,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.5075461641844428e-06,
"min": 1.5075461641844428e-06,
"max": 0.00029825500058166664,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.2613192462766642e-05,
"min": 2.2613192462766642e-05,
"max": 0.0038843992052002988,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10050248222222224,
"min": 0.10050248222222224,
"max": 0.19941833333333336,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5075372333333337,
"min": 1.4213532666666668,
"max": 2.752449433333333,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 6.019797399999995e-05,
"min": 6.019797399999995e-05,
"max": 0.009941891500000001,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0009029696099999993,
"min": 0.0009029696099999993,
"max": 0.12949049003000002,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.0068528493866324425,
"min": 0.006573980208486319,
"max": 0.4045696258544922,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.10279273986816406,
"min": 0.09203572571277618,
"max": 3.2365570068359375,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 216.3985507246377,
"min": 190.873417721519,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29863.0,
"min": 17424.0,
"max": 32499.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7836014364940533,
"min": -0.9999742459866309,
"max": 1.8086751463117114,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 246.13699823617935,
"min": -30.999201625585556,
"max": 283.9619979709387,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7836014364940533,
"min": -0.9999742459866309,
"max": 1.8086751463117114,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 246.13699823617935,
"min": -30.999201625585556,
"max": 283.9619979709387,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.015425271291346491,
"min": 0.013571335901013148,
"max": 8.68073938952552,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.128687438205816,
"min": 1.9685758271371014,
"max": 156.25330901145935,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690692381",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690700057"
},
"total": 7676.823282195,
"count": 1,
"self": 0.487801053000112,
"children": {
"run_training.setup": {
"total": 0.030912136999859285,
"count": 1,
"self": 0.030912136999859285
},
"TrainerController.start_learning": {
"total": 7676.304569004999,
"count": 1,
"self": 4.2867465459448795,
"children": {
"TrainerController._reset_env": {
"total": 4.08383707400003,
"count": 1,
"self": 4.08383707400003
},
"TrainerController.advance": {
"total": 7667.823578430056,
"count": 196063,
"self": 4.122779525214355,
"children": {
"env_step": {
"total": 5652.029589080801,
"count": 196063,
"self": 5320.917782719641,
"children": {
"SubprocessEnvManager._take_step": {
"total": 328.57825031113384,
"count": 196063,
"self": 14.379911542333502,
"children": {
"TorchPolicy.evaluate": {
"total": 314.19833876880034,
"count": 187552,
"self": 314.19833876880034
}
}
},
"workers": {
"total": 2.533556050026391,
"count": 196063,
"self": 0.0,
"children": {
"worker_root": {
"total": 7658.966845051986,
"count": 196063,
"is_parallel": true,
"self": 2695.4243037460483,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016969280000012077,
"count": 1,
"is_parallel": true,
"self": 0.0005536460000712395,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011432819999299682,
"count": 8,
"is_parallel": true,
"self": 0.0011432819999299682
}
}
},
"UnityEnvironment.step": {
"total": 0.05080075599994416,
"count": 1,
"is_parallel": true,
"self": 0.0005387559999689984,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005018159999963245,
"count": 1,
"is_parallel": true,
"self": 0.0005018159999963245
},
"communicator.exchange": {
"total": 0.04797005299997181,
"count": 1,
"is_parallel": true,
"self": 0.04797005299997181
},
"steps_from_proto": {
"total": 0.0017901310000070225,
"count": 1,
"is_parallel": true,
"self": 0.00034264900000380294,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014474820000032196,
"count": 8,
"is_parallel": true,
"self": 0.0014474820000032196
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4963.542541305937,
"count": 196062,
"is_parallel": true,
"self": 103.71747368603246,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 68.94208805377593,
"count": 196062,
"is_parallel": true,
"self": 68.94208805377593
},
"communicator.exchange": {
"total": 4481.419380136015,
"count": 196062,
"is_parallel": true,
"self": 4481.419380136015
},
"steps_from_proto": {
"total": 309.4635994301145,
"count": 196062,
"is_parallel": true,
"self": 62.81913088212218,
"children": {
"_process_rank_one_or_two_observation": {
"total": 246.64446854799235,
"count": 1568496,
"is_parallel": true,
"self": 246.64446854799235
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2011.6712098240403,
"count": 196063,
"self": 8.25805797199314,
"children": {
"process_trajectory": {
"total": 347.73920588205806,
"count": 196063,
"self": 347.07214390705917,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6670619749988873,
"count": 6,
"self": 0.6670619749988873
}
}
},
"_update_policy": {
"total": 1655.673945969989,
"count": 1399,
"self": 1077.678762597906,
"children": {
"TorchPPOOptimizer.update": {
"total": 577.9951833720831,
"count": 68358,
"self": 577.9951833720831
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.78999344422482e-07,
"count": 1,
"self": 8.78999344422482e-07
},
"TrainerController._save_models": {
"total": 0.11040607599898067,
"count": 1,
"self": 0.0014532119985233294,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10895286400045734,
"count": 1,
"self": 0.10895286400045734
}
}
}
}
}
}
}