jemal's picture
First Push
8f3c6fa verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5353277325630188,
"min": 0.5353277325630188,
"max": 1.475871205329895,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16034.13671875,
"min": 16034.13671875,
"max": 44772.02734375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989916.0,
"min": 29882.0,
"max": 989916.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989916.0,
"min": 29882.0,
"max": 989916.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3012092709541321,
"min": -0.12279359251260757,
"max": 0.30525824427604675,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 77.10957336425781,
"min": -29.5932559967041,
"max": 80.58817291259766,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.029363010078668594,
"min": 0.0012516822898760438,
"max": 0.3825514316558838,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.51693058013916,
"min": 0.3179273009300232,
"max": 90.66468811035156,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06926548547274172,
"min": 0.0643377494787946,
"max": 0.07337055569570068,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9697167966183841,
"min": 0.5036782812883799,
"max": 1.069606152974302,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.011229231258318581,
"min": 0.00038680095713888326,
"max": 0.012697830827609221,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.15720923761646013,
"min": 0.004254810528527716,
"max": 0.1904674624141383,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.302826137185713e-06,
"min": 7.302826137185713e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010223956592059998,
"min": 0.00010223956592059998,
"max": 0.0035082443305853,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10243424285714287,
"min": 0.10243424285714287,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4340794000000001,
"min": 1.3886848,
"max": 2.5694147000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002531808614285714,
"min": 0.0002531808614285714,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035445320599999997,
"min": 0.0035445320599999997,
"max": 0.11696452853000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011185184121131897,
"min": 0.011185184121131897,
"max": 0.4553283154964447,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15659257769584656,
"min": 0.15659257769584656,
"max": 3.18729829788208,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 530.4615384615385,
"min": 523.5666666666667,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27584.0,
"min": 16809.0,
"max": 32391.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.2387038142635272,
"min": -0.9999742455059483,
"max": 1.2430399637669325,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 64.41259834170341,
"min": -30.999201610684395,
"max": 74.58239782601595,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.2387038142635272,
"min": -0.9999742455059483,
"max": 1.2430399637669325,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 64.41259834170341,
"min": -30.999201610684395,
"max": 74.58239782601595,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.061457484152365044,
"min": 0.061457484152365044,
"max": 9.137401564156308,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1957891759229824,
"min": 3.1957891759229824,
"max": 155.33582659065723,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1710363181",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1710364996"
},
"total": 1814.4316346009996,
"count": 1,
"self": 0.37728766199961683,
"children": {
"run_training.setup": {
"total": 0.052258174999678886,
"count": 1,
"self": 0.052258174999678886
},
"TrainerController.start_learning": {
"total": 1814.0020887640003,
"count": 1,
"self": 1.606278955968719,
"children": {
"TrainerController._reset_env": {
"total": 2.1585847960000137,
"count": 1,
"self": 2.1585847960000137
},
"TrainerController.advance": {
"total": 1810.148427440031,
"count": 63359,
"self": 1.5901242840727718,
"children": {
"env_step": {
"total": 1204.4690544179293,
"count": 63359,
"self": 1061.9646168667855,
"children": {
"SubprocessEnvManager._take_step": {
"total": 141.4925858690308,
"count": 63359,
"self": 5.135585090047243,
"children": {
"TorchPolicy.evaluate": {
"total": 136.35700077898355,
"count": 62553,
"self": 136.35700077898355
}
}
},
"workers": {
"total": 1.011851682113047,
"count": 63359,
"self": 0.0,
"children": {
"worker_root": {
"total": 1811.4356805799166,
"count": 63359,
"is_parallel": true,
"self": 867.957377457973,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0023316440001508454,
"count": 1,
"is_parallel": true,
"self": 0.0006746229996679176,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016570210004829278,
"count": 8,
"is_parallel": true,
"self": 0.0016570210004829278
}
}
},
"UnityEnvironment.step": {
"total": 0.040314954000223224,
"count": 1,
"is_parallel": true,
"self": 0.00044979200038142153,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00032484599978488404,
"count": 1,
"is_parallel": true,
"self": 0.00032484599978488404
},
"communicator.exchange": {
"total": 0.03839946400012195,
"count": 1,
"is_parallel": true,
"self": 0.03839946400012195
},
"steps_from_proto": {
"total": 0.0011408519999349664,
"count": 1,
"is_parallel": true,
"self": 0.0002619639999466017,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008788879999883648,
"count": 8,
"is_parallel": true,
"self": 0.0008788879999883648
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 943.4783031219436,
"count": 63358,
"is_parallel": true,
"self": 26.041625976035448,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 17.322087656973054,
"count": 63358,
"is_parallel": true,
"self": 17.322087656973054
},
"communicator.exchange": {
"total": 825.1761111309843,
"count": 63358,
"is_parallel": true,
"self": 825.1761111309843
},
"steps_from_proto": {
"total": 74.93847835795077,
"count": 63358,
"is_parallel": true,
"self": 16.50608037939628,
"children": {
"_process_rank_one_or_two_observation": {
"total": 58.43239797855449,
"count": 506864,
"is_parallel": true,
"self": 58.43239797855449
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 604.0892487380288,
"count": 63359,
"self": 3.1215421109855015,
"children": {
"process_trajectory": {
"total": 121.86970277403998,
"count": 63359,
"self": 121.66743374703947,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20226902700051141,
"count": 2,
"self": 0.20226902700051141
}
}
},
"_update_policy": {
"total": 479.0980038530033,
"count": 449,
"self": 278.72414770100204,
"children": {
"TorchPPOOptimizer.update": {
"total": 200.37385615200128,
"count": 22764,
"self": 200.37385615200128
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.980003596865572e-07,
"count": 1,
"self": 9.980003596865572e-07
},
"TrainerController._save_models": {
"total": 0.08879657400029828,
"count": 1,
"self": 0.001519106000159809,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08727746800013847,
"count": 1,
"self": 0.08727746800013847
}
}
}
}
}
}
}