{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.6478807330131531, "min": 0.6263801455497742, "max": 1.5643631219863892, "count": 55 }, "Pyramids.Policy.Entropy.sum": { "value": 13030.1767578125, "min": 12637.845703125, "max": 31437.44140625, "count": 55 }, "Pyramids.Step.mean": { "value": 1099905.0, "min": 19968.0, "max": 1099905.0, "count": 55 }, "Pyramids.Step.sum": { "value": 1099905.0, "min": 19968.0, "max": 1099905.0, "count": 55 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.18532074987888336, "min": -0.05848095193505287, "max": 0.18532074987888336, "count": 55 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 32.245811462402344, "min": -9.47391414642334, "max": 32.245811462402344, "count": 55 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": 0.289790540933609, "min": -0.13738633692264557, "max": 0.6092210412025452, "count": 55 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": 50.423553466796875, "min": -23.630449295043945, "max": 104.17679595947266, "count": 55 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.07092678431466665, "min": 0.06203579677265258, "max": 0.07425731797519942, "count": 55 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 0.6383410588319998, "min": 0.2786206022105996, "max": 0.7294507481985623, "count": 55 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.013268985237120594, "min": 5.355411207126534e-05, "max": 0.04645436719389788, "count": 55 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.11942086713408535, "min": 0.00042843289657012274, "max": 0.4645436719389788, "count": 55 }, "Pyramids.Policy.LearningRate.mean": { "value": 2.7241900010606083e-06, "min": 2.7241900010606083e-06, "max": 0.00029723345546763636, "count": 55 }, "Pyramids.Policy.LearningRate.sum": { "value": 2.4517710009545473e-05, "min": 2.4517710009545473e-05, "max": 0.0020462021361145454, "count": 55 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.1009080303030303, "min": 0.1009080303030303, "max": 0.1990778181818182, "count": 55 }, "Pyramids.Policy.Epsilon.sum": { "value": 0.9081722727272727, "min": 0.7963112727272728, "max": 1.6820672727272725, "count": 55 }, "Pyramids.Policy.Beta.mean": { "value": 0.00010071222727272735, "min": 0.00010071222727272735, "max": 0.009907874036363637, "count": 55 }, "Pyramids.Policy.Beta.sum": { "value": 0.0009064100454545461, "min": 0.0009064100454545461, "max": 0.06823852054545455, "count": 55 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.0053162043914198875, "min": 0.0053162043914198875, "max": 0.34465861320495605, "count": 55 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.04784584045410156, "min": 0.04784584045410156, "max": 1.3786344528198242, "count": 55 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 524.25, "min": 524.0, "max": 999.0, "count": 55 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 20970.0, "min": 15451.0, "max": 30969.0, "count": 55 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 1.0756549702957272, "min": -1.0000000521540642, "max": 1.220999972838344, "count": 55 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 43.02619881182909, "min": -30.999601617455482, "max": 44.609999269247055, "count": 55 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 1.0756549702957272, "min": -1.0000000521540642, "max": 1.220999972838344, "count": 55 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 43.02619881182909, "min": -30.999601617455482, "max": 44.609999269247055, "count": 55 }, "Pyramids.Policy.RndReward.mean": { "value": 0.028838241543780896, "min": 0.028838241543780896, "max": 5.358188681304455, "count": 55 }, "Pyramids.Policy.RndReward.sum": { "value": 1.1535296617512358, "min": 1.0716585788759403, "max": 85.73101890087128, "count": 55 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 55 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 55 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1694189752", "python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1694192128" }, "total": 2376.032900941, "count": 1, "self": 1.03996320700071, "children": { "run_training.setup": { "total": 0.03963193399999909, "count": 1, "self": 0.03963193399999909 }, "TrainerController.start_learning": { "total": 2374.9533057999997, "count": 1, "self": 1.5645213870006955, "children": { "TrainerController._reset_env": { "total": 4.018152553000164, "count": 1, "self": 4.018152553000164 }, "TrainerController.advance": { "total": 2369.224561984999, "count": 69683, "self": 1.6067396940488834, "children": { "env_step": { "total": 1626.2533884279223, "count": 69683, "self": 1498.5316635419156, "children": { "SubprocessEnvManager._take_step": { "total": 126.75823395596422, "count": 69683, "self": 5.561636042016062, "children": { "TorchPolicy.evaluate": { "total": 121.19659791394815, "count": 68829, "self": 121.19659791394815 } } }, "workers": { "total": 0.9634909300425534, "count": 69683, "self": 0.0, "children": { "worker_root": { "total": 2369.482287998019, "count": 69683, "is_parallel": true, "self": 1002.8703880470307, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0018298659999800293, "count": 1, "is_parallel": true, "self": 0.0005603300000984746, "children": { "_process_rank_one_or_two_observation": { "total": 0.0012695359998815547, "count": 8, "is_parallel": true, "self": 0.0012695359998815547 } } }, "UnityEnvironment.step": { "total": 0.05850562300020101, "count": 1, "is_parallel": true, "self": 0.0006194860002324276, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0006058349999875645, "count": 1, "is_parallel": true, "self": 0.0006058349999875645 }, "communicator.exchange": { "total": 0.05505721600002289, "count": 1, "is_parallel": true, "self": 0.05505721600002289 }, "steps_from_proto": { "total": 0.00222308599995813, "count": 1, "is_parallel": true, "self": 0.0004818970005544543, "children": { "_process_rank_one_or_two_observation": { "total": 0.0017411889994036756, "count": 8, "is_parallel": true, "self": 0.0017411889994036756 } } } } } } }, "UnityEnvironment.step": { "total": 1366.6118999509881, "count": 69682, "is_parallel": true, "self": 38.3468355839957, "children": { "UnityEnvironment._generate_step_input": { "total": 27.467677006069152, "count": 69682, "is_parallel": true, "self": 27.467677006069152 }, "communicator.exchange": { "total": 1175.2653880369837, "count": 69682, "is_parallel": true, "self": 1175.2653880369837 }, "steps_from_proto": { "total": 125.53199932393954, "count": 69682, "is_parallel": true, "self": 24.651163354813434, "children": { "_process_rank_one_or_two_observation": { "total": 100.88083596912611, "count": 557456, "is_parallel": true, "self": 100.88083596912611 } } } } } } } } } } }, "trainer_advance": { "total": 741.3644338630279, "count": 69683, "self": 2.804215569023654, "children": { "process_trajectory": { "total": 131.1838605540031, "count": 69683, "self": 130.9957377060034, "children": { "RLTrainer._checkpoint": { "total": 0.18812284799969348, "count": 2, "self": 0.18812284799969348 } } }, "_update_policy": { "total": 607.3763577400011, "count": 480, "self": 394.8712574319918, "children": { "TorchPPOOptimizer.update": { "total": 212.50510030800933, "count": 25128, "self": 212.50510030800933 } } } } } } }, "trainer_threads": { "total": 1.3919998309575021e-06, "count": 1, "self": 1.3919998309575021e-06 }, "TrainerController._save_models": { "total": 0.14606848299990816, "count": 1, "self": 0.0014184989995555952, "children": { "RLTrainer._checkpoint": { "total": 0.14464998400035256, "count": 1, "self": 0.14464998400035256 } } } } } } }