{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.6539306640625, "min": 0.6028232574462891, "max": 1.4537408351898193, "count": 16 }, "Pyramids.Policy.Entropy.sum": { "value": 19712.0859375, "min": 18229.375, "max": 44100.68359375, "count": 16 }, "Pyramids.Step.mean": { "value": 479912.0, "min": 29952.0, "max": 479912.0, "count": 16 }, "Pyramids.Step.sum": { "value": 479912.0, "min": 29952.0, "max": 479912.0, "count": 16 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": -0.04190780222415924, "min": -0.12364022433757782, "max": 0.2433559149503708, "count": 16 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": -10.225503921508789, "min": -29.673654556274414, "max": 57.675350189208984, "count": 16 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": 0.04117130860686302, "min": 0.038385529071092606, "max": 0.6702629923820496, "count": 16 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": 10.045799255371094, "min": 9.327683448791504, "max": 158.85232543945312, "count": 16 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.06624294699970726, "min": 0.06453053994174056, "max": 0.07277360419767809, "count": 16 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 0.9274012579959016, "min": 0.49960591451578706, "max": 0.9918409633646569, "count": 16 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.003243076560211091, "min": 0.0002664673293188999, "max": 0.011737040361915776, "count": 16 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.04540307184295528, "min": 0.0034640752811456984, "max": 0.08215928253341043, "count": 16 }, "Pyramids.Policy.LearningRate.mean": { "value": 2.124982148818571e-05, "min": 2.124982148818571e-05, "max": 0.00029030126037577137, "count": 16 }, "Pyramids.Policy.LearningRate.sum": { "value": 0.00029749750083459995, "min": 0.00029749750083459995, "max": 0.0025548577483808, "count": 16 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.10708324285714285, "min": 0.10708324285714285, "max": 0.19676708571428575, "count": 16 }, "Pyramids.Policy.Epsilon.sum": { "value": 1.4991653999999999, "min": 1.3773696000000002, "max": 2.0926288, "count": 16 }, "Pyramids.Policy.Beta.mean": { "value": 0.0007176159614285712, "min": 0.0007176159614285712, "max": 0.00967703186285714, "count": 16 }, "Pyramids.Policy.Beta.sum": { "value": 0.010046623459999997, "min": 0.010046623459999997, "max": 0.08517675808000001, "count": 16 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.0317668542265892, "min": 0.03135249391198158, "max": 0.603436291217804, "count": 16 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.44473594427108765, "min": 0.4319664239883423, "max": 4.224053859710693, "count": 16 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 877.2121212121212, "min": 877.2121212121212, "max": 999.0, "count": 16 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 28948.0, "min": 15984.0, "max": 32307.0, "count": 16 }, "Pyramids.Environment.CumulativeReward.mean": { "value": -0.3870118045631577, "min": -1.0000000521540642, "max": -0.3870118045631577, "count": 16 }, "Pyramids.Environment.CumulativeReward.sum": { "value": -13.158401355147362, "min": -31.999601677060127, "max": -13.158401355147362, "count": 16 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": -0.3870118045631577, "min": -1.0000000521540642, "max": -0.3870118045631577, "count": 16 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": -13.158401355147362, "min": -31.999601677060127, "max": -13.158401355147362, "count": 16 }, "Pyramids.Policy.RndReward.mean": { "value": 0.2854456022614613, "min": 0.2854456022614613, "max": 12.268156342208385, "count": 16 }, "Pyramids.Policy.RndReward.sum": { "value": 9.705150476889685, "min": 9.705150476889685, "max": 196.29050147533417, "count": 16 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 16 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 16 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1716116429", "python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.2.1+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1716118048" }, "total": 1618.8223231379998, "count": 1, "self": 0.9643250449998959, "children": { "run_training.setup": { "total": 0.10280549100002645, "count": 1, "self": 0.10280549100002645 }, "TrainerController.start_learning": { "total": 1617.7551926019999, "count": 1, "self": 1.0706937680074589, "children": { "TrainerController._reset_env": { "total": 3.628296965000118, "count": 1, "self": 3.628296965000118 }, "TrainerController.advance": { "total": 1612.9099522949925, "count": 31442, "self": 1.2573563220983033, "children": { "env_step": { "total": 1026.0784332809694, "count": 31442, "self": 938.2381904670522, "children": { "SubprocessEnvManager._take_step": { "total": 87.09093237597904, "count": 31442, "self": 3.6509065740033293, "children": { "TorchPolicy.evaluate": { "total": 83.44002580197571, "count": 31320, "self": 83.44002580197571 } } }, "workers": { "total": 0.7493104379382203, "count": 31442, "self": 0.0, "children": { "worker_root": { "total": 1614.3455371089901, "count": 31442, "is_parallel": true, "self": 772.3103171410175, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.004371352000134721, "count": 1, "is_parallel": true, "self": 0.001008215000410928, "children": { "_process_rank_one_or_two_observation": { "total": 0.0033631369997237925, "count": 8, "is_parallel": true, "self": 0.0033631369997237925 } } }, "UnityEnvironment.step": { "total": 0.07054719300003853, "count": 1, "is_parallel": true, "self": 0.000769237999975303, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0004925719999846478, "count": 1, "is_parallel": true, "self": 0.0004925719999846478 }, "communicator.exchange": { "total": 0.06708438300006492, "count": 1, "is_parallel": true, "self": 0.06708438300006492 }, "steps_from_proto": { "total": 0.0022010000000136642, "count": 1, "is_parallel": true, "self": 0.00047565399995619373, "children": { "_process_rank_one_or_two_observation": { "total": 0.0017253460000574705, "count": 8, "is_parallel": true, "self": 0.0017253460000574705 } } } } } } }, "UnityEnvironment.step": { "total": 842.0352199679726, "count": 31441, "is_parallel": true, "self": 25.589559415981967, "children": { "UnityEnvironment._generate_step_input": { "total": 14.506342763025486, "count": 31441, "is_parallel": true, "self": 14.506342763025486 }, "communicator.exchange": { "total": 734.6547031799785, "count": 31441, "is_parallel": true, "self": 734.6547031799785 }, "steps_from_proto": { "total": 67.28461460898666, "count": 31441, "is_parallel": true, "self": 14.59154702193132, "children": { "_process_rank_one_or_two_observation": { "total": 52.69306758705534, "count": 251528, "is_parallel": true, "self": 52.69306758705534 } } } } } } } } } } }, "trainer_advance": { "total": 585.5741626919248, "count": 31442, "self": 1.8947418949110215, "children": { "process_trajectory": { "total": 86.92649685301512, "count": 31442, "self": 86.74257012201542, "children": { "RLTrainer._checkpoint": { "total": 0.18392673099970125, "count": 1, "self": 0.18392673099970125 } } }, "_update_policy": { "total": 496.75292394399867, "count": 198, "self": 199.2084763540238, "children": { "TorchPPOOptimizer.update": { "total": 297.5444475899749, "count": 11499, "self": 297.5444475899749 } } } } } } }, "trainer_threads": { "total": 1.3969997780804988e-06, "count": 1, "self": 1.3969997780804988e-06 }, "TrainerController._save_models": { "total": 0.1462481769999613, "count": 1, "self": 0.005781884000043647, "children": { "RLTrainer._checkpoint": { "total": 0.14046629299991764, "count": 1, "self": 0.14046629299991764 } } } } } } }