ahforoughi's picture
First Push
e1f08f2 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3234502077102661,
"min": 0.32301390171051025,
"max": 1.4400469064712524,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9600.001953125,
"min": 9600.001953125,
"max": 43685.26171875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989948.0,
"min": 29952.0,
"max": 989948.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989948.0,
"min": 29952.0,
"max": 989948.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.551596462726593,
"min": -0.09532348066568375,
"max": 0.5731770992279053,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 154.44700622558594,
"min": -22.877635955810547,
"max": 159.91641235351562,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.05366688221693039,
"min": 0.012212755158543587,
"max": 0.4041997790336609,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 15.026726722717285,
"min": 3.0287632942199707,
"max": 95.79534912109375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06645435084207624,
"min": 0.06543215284562316,
"max": 0.0722261587936856,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9968152626311435,
"min": 0.49824292056902103,
"max": 1.0612249622365697,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016585891865447976,
"min": 0.00038675618254793004,
"max": 0.017439287193285833,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.24878837798171966,
"min": 0.00541458655567102,
"max": 0.24878837798171966,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.54567748480667e-06,
"min": 7.54567748480667e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011318516227210005,
"min": 0.00011318516227210005,
"max": 0.0035071598309468,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251519333333334,
"min": 0.10251519333333334,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5377279000000001,
"min": 1.3691136000000002,
"max": 2.5690532,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002612678140000001,
"min": 0.0002612678140000001,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003919017210000001,
"min": 0.003919017210000001,
"max": 0.11692841468000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01329939253628254,
"min": 0.013048590160906315,
"max": 0.4217536449432373,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.19949088990688324,
"min": 0.18268026411533356,
"max": 2.952275514602661,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 329.2967032967033,
"min": 313.6043956043956,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29966.0,
"min": 15984.0,
"max": 32421.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5168153637876878,
"min": -1.0000000521540642,
"max": 1.6424263590476016,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 138.03019810467958,
"min": -32.000001668930054,
"max": 149.46079867333174,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5168153637876878,
"min": -1.0000000521540642,
"max": 1.6424263590476016,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 138.03019810467958,
"min": -32.000001668930054,
"max": 149.46079867333174,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.045219409689537485,
"min": 0.042431707330211836,
"max": 8.877920700237155,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.114966281747911,
"min": 3.861285367049277,
"max": 142.04673120379448,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1714069439",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1714071572"
},
"total": 2132.608589376,
"count": 1,
"self": 0.4769928420000724,
"children": {
"run_training.setup": {
"total": 0.048791671000117276,
"count": 1,
"self": 0.048791671000117276
},
"TrainerController.start_learning": {
"total": 2132.082804863,
"count": 1,
"self": 1.28648098296253,
"children": {
"TrainerController._reset_env": {
"total": 2.0972978690001582,
"count": 1,
"self": 2.0972978690001582
},
"TrainerController.advance": {
"total": 2128.6152456560376,
"count": 63857,
"self": 1.312738692146013,
"children": {
"env_step": {
"total": 1519.1494084269184,
"count": 63857,
"self": 1394.6666045448844,
"children": {
"SubprocessEnvManager._take_step": {
"total": 123.69512062697504,
"count": 63857,
"self": 4.358122484072055,
"children": {
"TorchPolicy.evaluate": {
"total": 119.33699814290298,
"count": 62569,
"self": 119.33699814290298
}
}
},
"workers": {
"total": 0.787683255058937,
"count": 63857,
"self": 0.0,
"children": {
"worker_root": {
"total": 2126.9572456410447,
"count": 63857,
"is_parallel": true,
"self": 847.7118789470296,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002121635999856153,
"count": 1,
"is_parallel": true,
"self": 0.000606754000273213,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00151488199958294,
"count": 8,
"is_parallel": true,
"self": 0.00151488199958294
}
}
},
"UnityEnvironment.step": {
"total": 0.0478811229997973,
"count": 1,
"is_parallel": true,
"self": 0.0006120299997292022,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005112320000080217,
"count": 1,
"is_parallel": true,
"self": 0.0005112320000080217
},
"communicator.exchange": {
"total": 0.04515043600008539,
"count": 1,
"is_parallel": true,
"self": 0.04515043600008539
},
"steps_from_proto": {
"total": 0.0016074249999746826,
"count": 1,
"is_parallel": true,
"self": 0.00033410800028832455,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001273316999686358,
"count": 8,
"is_parallel": true,
"self": 0.001273316999686358
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1279.245366694015,
"count": 63856,
"is_parallel": true,
"self": 33.284108818991854,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.511942109969596,
"count": 63856,
"is_parallel": true,
"self": 22.511942109969596
},
"communicator.exchange": {
"total": 1127.5366388020002,
"count": 63856,
"is_parallel": true,
"self": 1127.5366388020002
},
"steps_from_proto": {
"total": 95.9126769630534,
"count": 63856,
"is_parallel": true,
"self": 19.08276625528356,
"children": {
"_process_rank_one_or_two_observation": {
"total": 76.82991070776984,
"count": 510848,
"is_parallel": true,
"self": 76.82991070776984
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 608.153098536973,
"count": 63857,
"self": 2.4908433079874612,
"children": {
"process_trajectory": {
"total": 122.89451545797965,
"count": 63857,
"self": 122.70947350697952,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1850419510001302,
"count": 2,
"self": 0.1850419510001302
}
}
},
"_update_policy": {
"total": 482.76773977100584,
"count": 445,
"self": 283.9731238930158,
"children": {
"TorchPPOOptimizer.update": {
"total": 198.79461587799005,
"count": 22785,
"self": 198.79461587799005
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.150000212481245e-07,
"count": 1,
"self": 8.150000212481245e-07
},
"TrainerController._save_models": {
"total": 0.08377953999979582,
"count": 1,
"self": 0.0012994310000067344,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08248010899978908,
"count": 1,
"self": 0.08248010899978908
}
}
}
}
}
}
}