varunsappa's picture
First Push
517c4b7
raw
history blame
18.7 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5105743408203125,
"min": 0.5105743408203125,
"max": 1.4153269529342651,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15366.24609375,
"min": 15366.24609375,
"max": 42935.359375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989920.0,
"min": 29952.0,
"max": 989920.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989920.0,
"min": 29952.0,
"max": 989920.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.38944900035858154,
"min": -0.1186157688498497,
"max": 0.4265997111797333,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 102.03563690185547,
"min": -28.586400985717773,
"max": 114.75532531738281,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0021599363535642624,
"min": -0.013654683716595173,
"max": 0.3989749252796173,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.5659033060073853,
"min": -3.6184911727905273,
"max": 95.75398254394531,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07017989390892797,
"min": 0.06558731864095621,
"max": 0.07477418569673414,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9825185147249916,
"min": 0.523419299877139,
"max": 1.0575968790685246,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015139511283147536,
"min": 0.00029311721278402707,
"max": 0.015512501832518592,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2119531579640655,
"min": 0.003224289340624298,
"max": 0.21717502565526028,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.38629039507857e-06,
"min": 7.38629039507857e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010340806553109998,
"min": 0.00010340806553109998,
"max": 0.003382871972376099,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10246206428571429,
"min": 0.10246206428571429,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4344689000000002,
"min": 1.3886848,
"max": 2.5276239,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025596022214285707,
"min": 0.00025596022214285707,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003583443109999999,
"min": 0.003583443109999999,
"max": 0.11278962761,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01492005679756403,
"min": 0.01492005679756403,
"max": 0.49722275137901306,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.20888079702854156,
"min": 0.20888079702854156,
"max": 3.4805593490600586,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 452.3636363636364,
"min": 413.2837837837838,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29856.0,
"min": 15984.0,
"max": 33582.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3354636131588258,
"min": -1.0000000521540642,
"max": 1.5056026822610482,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 88.1405984684825,
"min": -30.997601613402367,
"max": 111.41459848731756,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3354636131588258,
"min": -1.0000000521540642,
"max": 1.5056026822610482,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 88.1405984684825,
"min": -30.997601613402367,
"max": 111.41459848731756,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.07002738876833588,
"min": 0.0680472015944385,
"max": 10.237286988645792,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.621807658710168,
"min": 4.584234749956522,
"max": 163.79659181833267,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1702881324",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1702883365"
},
"total": 2041.207688415,
"count": 1,
"self": 0.6875162319997798,
"children": {
"run_training.setup": {
"total": 0.05083875499985879,
"count": 1,
"self": 0.05083875499985879
},
"TrainerController.start_learning": {
"total": 2040.4693334280003,
"count": 1,
"self": 1.2664978919792702,
"children": {
"TrainerController._reset_env": {
"total": 2.08179599600021,
"count": 1,
"self": 2.08179599600021
},
"TrainerController.advance": {
"total": 2036.9992384270208,
"count": 63477,
"self": 1.3579070339569626,
"children": {
"env_step": {
"total": 1398.5533217770694,
"count": 63477,
"self": 1272.947818342996,
"children": {
"SubprocessEnvManager._take_step": {
"total": 124.82659982002565,
"count": 63477,
"self": 4.599146297052357,
"children": {
"TorchPolicy.evaluate": {
"total": 120.22745352297329,
"count": 62565,
"self": 120.22745352297329
}
}
},
"workers": {
"total": 0.7789036140477492,
"count": 63477,
"self": 0.0,
"children": {
"worker_root": {
"total": 2035.672002071055,
"count": 63477,
"is_parallel": true,
"self": 876.9947957390425,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016633759998967435,
"count": 1,
"is_parallel": true,
"self": 0.0005098349997751939,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011535410001215496,
"count": 8,
"is_parallel": true,
"self": 0.0011535410001215496
}
}
},
"UnityEnvironment.step": {
"total": 0.052268015999970885,
"count": 1,
"is_parallel": true,
"self": 0.0006409050001821015,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004923209999105893,
"count": 1,
"is_parallel": true,
"self": 0.0004923209999105893
},
"communicator.exchange": {
"total": 0.04936148100000537,
"count": 1,
"is_parallel": true,
"self": 0.04936148100000537
},
"steps_from_proto": {
"total": 0.0017733089998728246,
"count": 1,
"is_parallel": true,
"self": 0.0003570699998363125,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014162390000365122,
"count": 8,
"is_parallel": true,
"self": 0.0014162390000365122
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1158.6772063320125,
"count": 63476,
"is_parallel": true,
"self": 34.51350880409109,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.371749653911365,
"count": 63476,
"is_parallel": true,
"self": 24.371749653911365
},
"communicator.exchange": {
"total": 1002.9668501379958,
"count": 63476,
"is_parallel": true,
"self": 1002.9668501379958
},
"steps_from_proto": {
"total": 96.82509773601419,
"count": 63476,
"is_parallel": true,
"self": 18.899482530899604,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.92561520511458,
"count": 507808,
"is_parallel": true,
"self": 77.92561520511458
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 637.0880096159945,
"count": 63477,
"self": 2.4892717900133903,
"children": {
"process_trajectory": {
"total": 125.79276093197791,
"count": 63477,
"self": 125.56745761197794,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22530331999996633,
"count": 2,
"self": 0.22530331999996633
}
}
},
"_update_policy": {
"total": 508.80597689400315,
"count": 444,
"self": 302.1853591020515,
"children": {
"TorchPPOOptimizer.update": {
"total": 206.62061779195164,
"count": 22851,
"self": 206.62061779195164
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5300001905416138e-06,
"count": 1,
"self": 1.5300001905416138e-06
},
"TrainerController._save_models": {
"total": 0.12179958299975624,
"count": 1,
"self": 0.0020343119995231973,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11976527100023304,
"count": 1,
"self": 0.11976527100023304
}
}
}
}
}
}
}