TalesLF's picture
First Push
ee091b7
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.28620773553848267,
"min": 0.28620773553848267,
"max": 1.3941848278045654,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 8714.453125,
"min": 8714.453125,
"max": 42293.9921875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989911.0,
"min": 29952.0,
"max": 989911.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989911.0,
"min": 29952.0,
"max": 989911.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5719218254089355,
"min": -0.14826145768165588,
"max": 0.6345453262329102,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 160.1381072998047,
"min": -36.02753448486328,
"max": 180.21087646484375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.015681233257055283,
"min": -0.004872321616858244,
"max": 0.342519074678421,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.390745162963867,
"min": -1.344760775566101,
"max": 81.17701721191406,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06947490652410558,
"min": 0.06514014818357351,
"max": 0.07274604728706287,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9726486913374781,
"min": 0.48159459436693003,
"max": 1.0720746292305798,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.018775865997581388,
"min": 0.000894337887281473,
"max": 0.018775865997581388,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.26286212396613945,
"min": 0.006260365210970311,
"max": 0.266711541118817,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.522447492550002e-06,
"min": 7.522447492550002e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010531426489570004,
"min": 0.00010531426489570004,
"max": 0.0036297100900966996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250745000000001,
"min": 0.10250745000000001,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4351043,
"min": 1.3691136000000002,
"max": 2.6099033,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002604942550000001,
"min": 0.0002604942550000001,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003646919570000002,
"min": 0.003646919570000002,
"max": 0.12100933967,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008779570460319519,
"min": 0.008779570460319519,
"max": 0.4866790473461151,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12291398644447327,
"min": 0.12291398644447327,
"max": 3.4067533016204834,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 304.53921568627453,
"min": 299.28,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31063.0,
"min": 15984.0,
"max": 32799.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6534058641569287,
"min": -1.0000000521540642,
"max": 1.6843711160814638,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 168.64739814400673,
"min": -32.000001668930054,
"max": 168.64739814400673,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6534058641569287,
"min": -1.0000000521540642,
"max": 1.6843711160814638,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 168.64739814400673,
"min": -32.000001668930054,
"max": 168.64739814400673,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.027961951244637306,
"min": 0.027866449126811473,
"max": 9.934462127275765,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.8521190269530052,
"min": 2.7587784635543358,
"max": 158.95139403641224,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688437116",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1688439401"
},
"total": 2285.401040783,
"count": 1,
"self": 0.5743434069991054,
"children": {
"run_training.setup": {
"total": 0.03895458600072743,
"count": 1,
"self": 0.03895458600072743
},
"TrainerController.start_learning": {
"total": 2284.7877427900003,
"count": 1,
"self": 1.5547518311987005,
"children": {
"TrainerController._reset_env": {
"total": 4.099468727000385,
"count": 1,
"self": 4.099468727000385
},
"TrainerController.advance": {
"total": 2279.0367745638014,
"count": 64225,
"self": 1.5034287967519049,
"children": {
"env_step": {
"total": 1639.1270037220656,
"count": 64225,
"self": 1524.992937074091,
"children": {
"SubprocessEnvManager._take_step": {
"total": 113.25343436896947,
"count": 64225,
"self": 4.9004074259019035,
"children": {
"TorchPolicy.evaluate": {
"total": 108.35302694306756,
"count": 62577,
"self": 108.35302694306756
}
}
},
"workers": {
"total": 0.8806322790051127,
"count": 64225,
"self": 0.0,
"children": {
"worker_root": {
"total": 2279.406570759974,
"count": 64225,
"is_parallel": true,
"self": 871.6023004190502,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001737641000545409,
"count": 1,
"is_parallel": true,
"self": 0.0005288530010147952,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012087879995306139,
"count": 8,
"is_parallel": true,
"self": 0.0012087879995306139
}
}
},
"UnityEnvironment.step": {
"total": 0.04626485600056185,
"count": 1,
"is_parallel": true,
"self": 0.0005396070000642794,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048086399965541204,
"count": 1,
"is_parallel": true,
"self": 0.00048086399965541204
},
"communicator.exchange": {
"total": 0.04344958800083987,
"count": 1,
"is_parallel": true,
"self": 0.04344958800083987
},
"steps_from_proto": {
"total": 0.0017947970000022906,
"count": 1,
"is_parallel": true,
"self": 0.00035787800061370945,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014369189993885811,
"count": 8,
"is_parallel": true,
"self": 0.0014369189993885811
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1407.8042703409237,
"count": 64224,
"is_parallel": true,
"self": 33.37613834170497,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.74970432221926,
"count": 64224,
"is_parallel": true,
"self": 22.74970432221926
},
"communicator.exchange": {
"total": 1250.1854122220393,
"count": 64224,
"is_parallel": true,
"self": 1250.1854122220393
},
"steps_from_proto": {
"total": 101.49301545496019,
"count": 64224,
"is_parallel": true,
"self": 20.34488065082951,
"children": {
"_process_rank_one_or_two_observation": {
"total": 81.14813480413068,
"count": 513792,
"is_parallel": true,
"self": 81.14813480413068
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 638.4063420449838,
"count": 64225,
"self": 2.895981084860068,
"children": {
"process_trajectory": {
"total": 109.84948761712621,
"count": 64225,
"self": 109.64794533112581,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2015422860004037,
"count": 2,
"self": 0.2015422860004037
}
}
},
"_update_policy": {
"total": 525.6608733429975,
"count": 453,
"self": 336.18082120103736,
"children": {
"TorchPPOOptimizer.update": {
"total": 189.4800521419602,
"count": 22824,
"self": 189.4800521419602
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2749997040373273e-06,
"count": 1,
"self": 1.2749997040373273e-06
},
"TrainerController._save_models": {
"total": 0.09674639300010313,
"count": 1,
"self": 0.0014335819996631471,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09531281100043998,
"count": 1,
"self": 0.09531281100043998
}
}
}
}
}
}
}