dhinman's picture
First Push
9e06921
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4012230634689331,
"min": 0.38882139325141907,
"max": 1.3915460109710693,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12011.013671875,
"min": 11689.662109375,
"max": 42213.94140625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989936.0,
"min": 29952.0,
"max": 989936.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989936.0,
"min": 29952.0,
"max": 989936.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6269130706787109,
"min": -0.07648507505655289,
"max": 0.6269130706787109,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 178.04331970214844,
"min": -18.432903289794922,
"max": 178.04331970214844,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0006848539924249053,
"min": -0.02524637058377266,
"max": 0.5005385279655457,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.19449853897094727,
"min": -6.665041923522949,
"max": 118.62762451171875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06549766080635083,
"min": 0.06399838118685035,
"max": 0.07307493575736587,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9824649120952624,
"min": 0.49871460198181894,
"max": 1.0921134987546988,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014147582980028042,
"min": 0.0006362919493185139,
"max": 0.018004458422089146,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21221374470042062,
"min": 0.008908087290459193,
"max": 0.24051189550664276,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.456837514420003e-06,
"min": 7.456837514420003e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011185256271630004,
"min": 0.00011185256271630004,
"max": 0.0035081180306273997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248557999999999,
"min": 0.10248557999999999,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5372837,
"min": 1.3886848,
"max": 2.5693726000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025830944200000005,
"min": 0.00025830944200000005,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003874641630000001,
"min": 0.003874641630000001,
"max": 0.11696032273999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009987115859985352,
"min": 0.009987115859985352,
"max": 0.6365255117416382,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14980673789978027,
"min": 0.1403665393590927,
"max": 4.455678462982178,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 324.3645833333333,
"min": 302.18390804597703,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31139.0,
"min": 15984.0,
"max": 33481.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6127810413900174,
"min": -1.0000000521540642,
"max": 1.6765408910472284,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 153.21419893205166,
"min": -29.7412016838789,
"max": 158.5165982618928,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6127810413900174,
"min": -1.0000000521540642,
"max": 1.6765408910472284,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 153.21419893205166,
"min": -29.7412016838789,
"max": 158.5165982618928,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03340763466349975,
"min": 0.033007459157653844,
"max": 14.277171859517694,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1737252930324757,
"min": 2.9046564058735385,
"max": 228.4347497522831,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691758483",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1691760922"
},
"total": 2439.447275641999,
"count": 1,
"self": 0.9483664049994331,
"children": {
"run_training.setup": {
"total": 0.04048803699970449,
"count": 1,
"self": 0.04048803699970449
},
"TrainerController.start_learning": {
"total": 2438.4584212,
"count": 1,
"self": 1.7530878519728503,
"children": {
"TrainerController._reset_env": {
"total": 4.525491329000033,
"count": 1,
"self": 4.525491329000033
},
"TrainerController.advance": {
"total": 2432.020281343027,
"count": 63963,
"self": 1.7752535971121688,
"children": {
"env_step": {
"total": 1739.875731707869,
"count": 63963,
"self": 1612.287845370729,
"children": {
"SubprocessEnvManager._take_step": {
"total": 126.54448699816203,
"count": 63963,
"self": 5.214587332099654,
"children": {
"TorchPolicy.evaluate": {
"total": 121.32989966606237,
"count": 62553,
"self": 121.32989966606237
}
}
},
"workers": {
"total": 1.0433993389779062,
"count": 63963,
"self": 0.0,
"children": {
"worker_root": {
"total": 2432.1782054769455,
"count": 63963,
"is_parallel": true,
"self": 950.5988141699254,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018218929999420652,
"count": 1,
"is_parallel": true,
"self": 0.0005905849993723677,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012313080005696975,
"count": 8,
"is_parallel": true,
"self": 0.0012313080005696975
}
}
},
"UnityEnvironment.step": {
"total": 0.07802595000066503,
"count": 1,
"is_parallel": true,
"self": 0.0005818450008518994,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005028060004406143,
"count": 1,
"is_parallel": true,
"self": 0.0005028060004406143
},
"communicator.exchange": {
"total": 0.07502761599971564,
"count": 1,
"is_parallel": true,
"self": 0.07502761599971564
},
"steps_from_proto": {
"total": 0.001913682999656885,
"count": 1,
"is_parallel": true,
"self": 0.00034027099991362775,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015734119997432572,
"count": 8,
"is_parallel": true,
"self": 0.0015734119997432572
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1481.5793913070202,
"count": 63962,
"is_parallel": true,
"self": 36.44222396619534,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.962392915007513,
"count": 63962,
"is_parallel": true,
"self": 23.962392915007513
},
"communicator.exchange": {
"total": 1308.7341079130456,
"count": 63962,
"is_parallel": true,
"self": 1308.7341079130456
},
"steps_from_proto": {
"total": 112.44066651277171,
"count": 63962,
"is_parallel": true,
"self": 22.58210850327123,
"children": {
"_process_rank_one_or_two_observation": {
"total": 89.85855800950048,
"count": 511696,
"is_parallel": true,
"self": 89.85855800950048
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 690.3692960380458,
"count": 63963,
"self": 3.09787722515739,
"children": {
"process_trajectory": {
"total": 116.6892435258851,
"count": 63963,
"self": 116.42510706688608,
"children": {
"RLTrainer._checkpoint": {
"total": 0.26413645899901894,
"count": 2,
"self": 0.26413645899901894
}
}
},
"_update_policy": {
"total": 570.5821752870033,
"count": 453,
"self": 368.6402027899967,
"children": {
"TorchPPOOptimizer.update": {
"total": 201.94197249700665,
"count": 22815,
"self": 201.94197249700665
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5170007827691734e-06,
"count": 1,
"self": 1.5170007827691734e-06
},
"TrainerController._save_models": {
"total": 0.1595591589994001,
"count": 1,
"self": 0.002012392998949508,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1575467660004506,
"count": 1,
"self": 0.1575467660004506
}
}
}
}
}
}
}