ppo-PyramidsRND / run_logs /timers.json
optimopium's picture
First Push
9102158
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.45897483825683594,
"min": 0.4388624429702759,
"max": 1.4322712421417236,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13805.962890625,
"min": 13116.720703125,
"max": 43449.37890625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989904.0,
"min": 29952.0,
"max": 989904.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989904.0,
"min": 29952.0,
"max": 989904.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5238503813743591,
"min": -0.0890401229262352,
"max": 0.547838568687439,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 145.63040161132812,
"min": -21.36962890625,
"max": 149.5599365234375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.01164961513131857,
"min": -0.016367048025131226,
"max": 0.29524579644203186,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -3.238593101501465,
"min": -4.337267875671387,
"max": 70.85899353027344,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07135808768396688,
"min": 0.06516575438321988,
"max": 0.07702395185884219,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9990132275755363,
"min": 0.5391676630118953,
"max": 1.0387658922506187,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016113446517487143,
"min": 0.000224180416215506,
"max": 0.016509126577321218,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22558825124482002,
"min": 0.002465984578370566,
"max": 0.23112777208249705,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.738747420449999e-06,
"min": 7.738747420449999e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010834246388629998,
"min": 0.00010834246388629998,
"max": 0.0033831482722840004,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10257955,
"min": 0.10257955,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4361137000000002,
"min": 1.3886848,
"max": 2.5277160000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000267697045,
"min": 0.000267697045,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00374775863,
"min": 0.00374775863,
"max": 0.1127988284,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01015374343842268,
"min": 0.009028873406350613,
"max": 0.41332873702049255,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14215241372585297,
"min": 0.12640422582626343,
"max": 2.893301248550415,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 382.6585365853659,
"min": 348.84705882352944,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31378.0,
"min": 15984.0,
"max": 32734.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5685438856300784,
"min": -1.0000000521540642,
"max": 1.6249011848346298,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 128.62059862166643,
"min": -31.99760165810585,
"max": 134.86679834127426,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5685438856300784,
"min": -1.0000000521540642,
"max": 1.6249011848346298,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 128.62059862166643,
"min": -31.99760165810585,
"max": 134.86679834127426,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03942516004196766,
"min": 0.03411839867844452,
"max": 9.117295523174107,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.232863123441348,
"min": 2.80491226221784,
"max": 145.8767283707857,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685779323",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1685781424"
},
"total": 2101.311731916,
"count": 1,
"self": 0.424536055000317,
"children": {
"run_training.setup": {
"total": 0.06497518800006219,
"count": 1,
"self": 0.06497518800006219
},
"TrainerController.start_learning": {
"total": 2100.8222206729997,
"count": 1,
"self": 1.2386134149824102,
"children": {
"TrainerController._reset_env": {
"total": 4.201419610999892,
"count": 1,
"self": 4.201419610999892
},
"TrainerController.advance": {
"total": 2095.2901361620175,
"count": 63797,
"self": 1.236816204859224,
"children": {
"env_step": {
"total": 1492.819433318051,
"count": 63797,
"self": 1390.6769146050494,
"children": {
"SubprocessEnvManager._take_step": {
"total": 101.42541254998332,
"count": 63797,
"self": 4.488536998977224,
"children": {
"TorchPolicy.evaluate": {
"total": 96.9368755510061,
"count": 62556,
"self": 96.9368755510061
}
}
},
"workers": {
"total": 0.7171061630183431,
"count": 63797,
"self": 0.0,
"children": {
"worker_root": {
"total": 2096.673262696955,
"count": 63797,
"is_parallel": true,
"self": 809.2304782108936,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0026986090001628327,
"count": 1,
"is_parallel": true,
"self": 0.0008427770001162571,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018558320000465756,
"count": 8,
"is_parallel": true,
"self": 0.0018558320000465756
}
}
},
"UnityEnvironment.step": {
"total": 0.04546839799991176,
"count": 1,
"is_parallel": true,
"self": 0.0005726100000629231,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005045419998168654,
"count": 1,
"is_parallel": true,
"self": 0.0005045419998168654
},
"communicator.exchange": {
"total": 0.042696970999941186,
"count": 1,
"is_parallel": true,
"self": 0.042696970999941186
},
"steps_from_proto": {
"total": 0.0016942750000907836,
"count": 1,
"is_parallel": true,
"self": 0.0003514720001476235,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00134280299994316,
"count": 8,
"is_parallel": true,
"self": 0.00134280299994316
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1287.4427844860616,
"count": 63796,
"is_parallel": true,
"self": 30.547447783971847,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.88210380308942,
"count": 63796,
"is_parallel": true,
"self": 21.88210380308942
},
"communicator.exchange": {
"total": 1141.4249341630195,
"count": 63796,
"is_parallel": true,
"self": 1141.4249341630195
},
"steps_from_proto": {
"total": 93.58829873598074,
"count": 63796,
"is_parallel": true,
"self": 18.84870350069764,
"children": {
"_process_rank_one_or_two_observation": {
"total": 74.7395952352831,
"count": 510368,
"is_parallel": true,
"self": 74.7395952352831
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 601.2338866391071,
"count": 63797,
"self": 2.3782388590598202,
"children": {
"process_trajectory": {
"total": 101.91785501704703,
"count": 63797,
"self": 101.65958808204664,
"children": {
"RLTrainer._checkpoint": {
"total": 0.25826693500039255,
"count": 2,
"self": 0.25826693500039255
}
}
},
"_update_policy": {
"total": 496.9377927630003,
"count": 447,
"self": 318.1388946690058,
"children": {
"TorchPPOOptimizer.update": {
"total": 178.79889809399447,
"count": 22821,
"self": 178.79889809399447
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1279998943791725e-06,
"count": 1,
"self": 1.1279998943791725e-06
},
"TrainerController._save_models": {
"total": 0.09205035700006192,
"count": 1,
"self": 0.0014512180000565422,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09059913900000538,
"count": 1,
"self": 0.09059913900000538
}
}
}
}
}
}
}