Pyramids / run_logs /timers.json
meganstodel's picture
First training of Pyramids
d225ef8
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3927719295024872,
"min": 0.3345213234424591,
"max": 1.5908561944961548,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11751.736328125,
"min": 9982.1162109375,
"max": 48260.21484375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989969.0,
"min": 29952.0,
"max": 989969.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989969.0,
"min": 29952.0,
"max": 989969.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.06096706911921501,
"min": -0.13392141461372375,
"max": -0.058662671595811844,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -14.693063735961914,
"min": -31.739376068115234,
"max": -14.267111778259277,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.030788179486989975,
"min": 0.026850635185837746,
"max": 0.4137192666530609,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.419951438903809,
"min": 6.47100305557251,
"max": 99.70634460449219,
"count": 33
},
"Pyramids.Policy.CuriosityValueEstimate.mean": {
"value": 0.5742224454879761,
"min": 0.5742224454879761,
"max": 3.045016050338745,
"count": 33
},
"Pyramids.Policy.CuriosityValueEstimate.sum": {
"value": 138.38760375976562,
"min": 136.74951171875,
"max": 733.848876953125,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.02909161233032743,
"min": 0.028808268814140725,
"max": 0.04354854062143149,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.14545806165163716,
"min": 0.136160298956163,
"max": 0.21774270310715746,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.001329722151470681,
"min": 0.0004763982726162243,
"max": 0.040207329172655154,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.006648610757353405,
"min": 0.0023819913630811216,
"max": 0.16082931669062062,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.599277466940001e-06,
"min": 7.599277466940001e-06,
"max": 0.00029544960151680006,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 3.7996387334700006e-05,
"min": 3.7996387334700006e-05,
"max": 0.0016110864629711996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10253306,
"min": 0.10253306,
"max": 0.19848320000000003,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.5126653,
"min": 0.5126653,
"max": 1.1370288000000002,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 3.507729400000001e-05,
"min": 3.507729400000001e-05,
"max": 0.0009849836800000001,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00017538647000000002,
"min": 0.00017538647000000002,
"max": 0.00537658512,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.026291707530617714,
"min": 0.026291707530617714,
"max": 0.5129854679107666,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13145853579044342,
"min": 0.13145853579044342,
"max": 2.3161582946777344,
"count": 33
},
"Pyramids.Losses.CuriosityForwardLoss.mean": {
"value": 0.054038520380854604,
"min": 0.054038520380854604,
"max": 0.5813339463519779,
"count": 33
},
"Pyramids.Losses.CuriosityForwardLoss.sum": {
"value": 0.270192601904273,
"min": 0.270192601904273,
"max": 2.3253357854079115,
"count": 33
},
"Pyramids.Losses.CuriosityInverseLoss.mean": {
"value": 0.11195765189826487,
"min": 0.10988195906910632,
"max": 1.0550547594373876,
"count": 33
},
"Pyramids.Losses.CuriosityInverseLoss.sum": {
"value": 0.5597882594913244,
"min": 0.5597882594913244,
"max": 4.22021903774955,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 964.8064516129032,
"min": 804.5,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29909.0,
"min": 15984.0,
"max": 33199.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.7718323077405652,
"min": -1.0000000521540642,
"max": -0.09919416334699183,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -23.926801539957523,
"min": -31.996801659464836,
"max": -3.372601553797722,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.7718323077405652,
"min": -1.0000000521540642,
"max": -0.09919416334699183,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -23.926801539957523,
"min": -31.996801659464836,
"max": -3.372601553797722,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.2740312070555745,
"min": 0.25448191026225686,
"max": 6.806104302406311,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 8.494967418722808,
"min": 7.634457307867706,
"max": 175.9811226129532,
"count": 33
},
"Pyramids.Policy.CuriosityReward.mean": {
"value": 5.350721614014718,
"min": 5.3234859297672905,
"max": 42.04442534727209,
"count": 33
},
"Pyramids.Policy.CuriosityReward.sum": {
"value": 165.87237003445625,
"min": 159.70457789301872,
"max": 1429.510461807251,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676560841",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training 2 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1676563000"
},
"total": 2159.1076753690004,
"count": 1,
"self": 0.47553678899930674,
"children": {
"run_training.setup": {
"total": 0.12349426800028596,
"count": 1,
"self": 0.12349426800028596
},
"TrainerController.start_learning": {
"total": 2158.508644312001,
"count": 1,
"self": 1.3994665169266227,
"children": {
"TrainerController._reset_env": {
"total": 7.210593030000382,
"count": 1,
"self": 7.210593030000382
},
"TrainerController.advance": {
"total": 2149.809942970074,
"count": 62930,
"self": 1.4435820270491604,
"children": {
"env_step": {
"total": 1445.1240899059467,
"count": 62930,
"self": 1329.0530054199216,
"children": {
"SubprocessEnvManager._take_step": {
"total": 115.2503089261254,
"count": 62930,
"self": 4.758791506123998,
"children": {
"TorchPolicy.evaluate": {
"total": 110.4915174200014,
"count": 62561,
"self": 36.89906582790354,
"children": {
"TorchPolicy.sample_actions": {
"total": 73.59245159209786,
"count": 62561,
"self": 73.59245159209786
}
}
}
}
},
"workers": {
"total": 0.82077555989963,
"count": 62930,
"self": 0.0,
"children": {
"worker_root": {
"total": 2153.7299986752214,
"count": 62930,
"is_parallel": true,
"self": 939.2318687921997,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020292749995860504,
"count": 1,
"is_parallel": true,
"self": 0.0007508389981012442,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012784360014848062,
"count": 8,
"is_parallel": true,
"self": 0.0012784360014848062
}
}
},
"UnityEnvironment.step": {
"total": 0.04860341199946561,
"count": 1,
"is_parallel": true,
"self": 0.0007584909990328015,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005181970000194269,
"count": 1,
"is_parallel": true,
"self": 0.0005181970000194269
},
"communicator.exchange": {
"total": 0.04565544300021429,
"count": 1,
"is_parallel": true,
"self": 0.04565544300021429
},
"steps_from_proto": {
"total": 0.0016712810001990874,
"count": 1,
"is_parallel": true,
"self": 0.00040697900021768874,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012643019999813987,
"count": 8,
"is_parallel": true,
"self": 0.0012643019999813987
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1214.4981298830216,
"count": 62929,
"is_parallel": true,
"self": 31.409194018135167,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.935441597883255,
"count": 62929,
"is_parallel": true,
"self": 23.935441597883255
},
"communicator.exchange": {
"total": 1055.1223136239896,
"count": 62929,
"is_parallel": true,
"self": 1055.1223136239896
},
"steps_from_proto": {
"total": 104.03118064301361,
"count": 62929,
"is_parallel": true,
"self": 23.448206617877076,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.58297402513654,
"count": 503432,
"is_parallel": true,
"self": 80.58297402513654
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 703.2422710370784,
"count": 62930,
"self": 2.2271853410020412,
"children": {
"process_trajectory": {
"total": 187.03582389406893,
"count": 62930,
"self": 186.76798699206847,
"children": {
"RLTrainer._checkpoint": {
"total": 0.26783690200045385,
"count": 2,
"self": 0.26783690200045385
}
}
},
"_update_policy": {
"total": 513.9792618020074,
"count": 180,
"self": 241.91819204698822,
"children": {
"TorchPPOOptimizer.update": {
"total": 272.06106975501916,
"count": 5679,
"self": 272.06106975501916
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.369996405439451e-07,
"count": 1,
"self": 9.369996405439451e-07
},
"TrainerController._save_models": {
"total": 0.08864085799996246,
"count": 1,
"self": 0.001518477999525203,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08712238000043726,
"count": 1,
"self": 0.08712238000043726
}
}
}
}
}
}
}