pyramid_ppo / run_logs /timers.json
GeorgeImmanuel's picture
my first pyramid game using ppo
1cb3f24 verified
raw
history blame
18.8 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3164612948894501,
"min": 0.3079012632369995,
"max": 1.433062195777893,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9539.4091796875,
"min": 9296.1552734375,
"max": 43473.375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989961.0,
"min": 29952.0,
"max": 989961.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989961.0,
"min": 29952.0,
"max": 989961.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6362255811691284,
"min": -0.08559349179267883,
"max": 0.6650585532188416,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 178.77938842773438,
"min": -20.62803077697754,
"max": 190.87181091308594,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.009952552616596222,
"min": -0.01849052682518959,
"max": 0.4459307789802551,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.7966673374176025,
"min": -4.863008499145508,
"max": 105.68559265136719,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06864753445205722,
"min": 0.06397803956044773,
"max": 0.07250913445063652,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9610654823288011,
"min": 0.4794036922222017,
"max": 1.0665216220077127,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01607535640955792,
"min": 0.0005407533054114443,
"max": 0.01885776720952154,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22505498973381088,
"min": 0.004326026443291555,
"max": 0.26400874093330157,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.515290352078572e-06,
"min": 7.515290352078572e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001052140649291,
"min": 0.0001052140649291,
"max": 0.0033826835724388993,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250506428571429,
"min": 0.10250506428571429,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4350709000000001,
"min": 1.3691136000000002,
"max": 2.5277620000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026025592214285717,
"min": 0.00026025592214285717,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036435829100000005,
"min": 0.0036435829100000005,
"max": 0.11278335389000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013431372120976448,
"min": 0.013431372120976448,
"max": 0.6062003970146179,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.18803921341896057,
"min": 0.18803921341896057,
"max": 4.24340295791626,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 308.9130434782609,
"min": 272.64285714285717,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28420.0,
"min": 15984.0,
"max": 32798.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.625856508055459,
"min": -1.0000000521540642,
"max": 1.7044719904661179,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 149.57879874110222,
"min": -32.000001668930054,
"max": 187.46199847012758,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.625856508055459,
"min": -1.0000000521540642,
"max": 1.7044719904661179,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 149.57879874110222,
"min": -32.000001668930054,
"max": 187.46199847012758,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04282578554367596,
"min": 0.038690549169197244,
"max": 12.699021624401212,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.9399722700181883,
"min": 3.9399722700181883,
"max": 203.1843459904194,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1719500438",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/ppo/PyramidsRND.yaml --env=/content/training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1719503723"
},
"total": 3285.544901331,
"count": 1,
"self": 0.6961744560003353,
"children": {
"run_training.setup": {
"total": 0.07229135299985501,
"count": 1,
"self": 0.07229135299985501
},
"TrainerController.start_learning": {
"total": 3284.7764355219997,
"count": 1,
"self": 2.231652660009786,
"children": {
"TrainerController._reset_env": {
"total": 2.9313808130000325,
"count": 1,
"self": 2.9313808130000325
},
"TrainerController.advance": {
"total": 3279.5276752599884,
"count": 64140,
"self": 2.516240050958004,
"children": {
"env_step": {
"total": 2237.4579338289977,
"count": 64140,
"self": 2079.640005545929,
"children": {
"SubprocessEnvManager._take_step": {
"total": 156.39211676001082,
"count": 64140,
"self": 6.561820003000548,
"children": {
"TorchPolicy.evaluate": {
"total": 149.83029675701027,
"count": 62560,
"self": 149.83029675701027
}
}
},
"workers": {
"total": 1.4258115230582007,
"count": 64140,
"self": 0.0,
"children": {
"worker_root": {
"total": 3277.3102046259905,
"count": 64140,
"is_parallel": true,
"self": 1379.88545055205,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004558414000030098,
"count": 1,
"is_parallel": true,
"self": 0.001313592000542485,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0032448219994876126,
"count": 8,
"is_parallel": true,
"self": 0.0032448219994876126
}
}
},
"UnityEnvironment.step": {
"total": 0.06632136899997931,
"count": 1,
"is_parallel": true,
"self": 0.0008938649998526671,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005408270001225901,
"count": 1,
"is_parallel": true,
"self": 0.0005408270001225901
},
"communicator.exchange": {
"total": 0.06257284000002983,
"count": 1,
"is_parallel": true,
"self": 0.06257284000002983
},
"steps_from_proto": {
"total": 0.002313836999974228,
"count": 1,
"is_parallel": true,
"self": 0.00041863600017677527,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018952009997974528,
"count": 8,
"is_parallel": true,
"self": 0.0018952009997974528
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1897.4247540739404,
"count": 64139,
"is_parallel": true,
"self": 49.986979416052236,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 30.90677041496565,
"count": 64139,
"is_parallel": true,
"self": 30.90677041496565
},
"communicator.exchange": {
"total": 1684.841212373907,
"count": 64139,
"is_parallel": true,
"self": 1684.841212373907
},
"steps_from_proto": {
"total": 131.6897918690156,
"count": 64139,
"is_parallel": true,
"self": 28.37178584190383,
"children": {
"_process_rank_one_or_two_observation": {
"total": 103.31800602711178,
"count": 513112,
"is_parallel": true,
"self": 103.31800602711178
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1039.5535013800327,
"count": 64140,
"self": 4.251810807149695,
"children": {
"process_trajectory": {
"total": 161.68420717888398,
"count": 64140,
"self": 161.49820693888455,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18600023999943005,
"count": 2,
"self": 0.18600023999943005
}
}
},
"_update_policy": {
"total": 873.617483393999,
"count": 445,
"self": 363.5463774349439,
"children": {
"TorchPPOOptimizer.update": {
"total": 510.0711059590551,
"count": 22848,
"self": 510.0711059590551
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.620007404009812e-07,
"count": 1,
"self": 9.620007404009812e-07
},
"TrainerController._save_models": {
"total": 0.08572582700071507,
"count": 1,
"self": 0.002646373000970925,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08307945399974415,
"count": 1,
"self": 0.08307945399974415
}
}
}
}
}
}
}