agoyal496's picture
First Push
2e71842
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3987165689468384,
"min": 0.3861919343471527,
"max": 1.3533161878585815,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11923.220703125,
"min": 11530.146484375,
"max": 41054.19921875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989988.0,
"min": 29952.0,
"max": 989988.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989988.0,
"min": 29952.0,
"max": 989988.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.41220057010650635,
"min": -0.09565431624650955,
"max": 0.4572250545024872,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 109.64535522460938,
"min": -23.052690505981445,
"max": 120.82069396972656,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.04009908810257912,
"min": -0.04009908810257912,
"max": 0.5330755710601807,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -10.666357040405273,
"min": -10.666357040405273,
"max": 126.33890533447266,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06777882515286494,
"min": 0.06493677061683098,
"max": 0.07318694618851143,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9489035521401092,
"min": 0.51230862331958,
"max": 1.0628151346269683,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015734387915429016,
"min": 0.0008547672344668508,
"max": 0.016256147493020186,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2202814308160062,
"min": 0.009402439579135358,
"max": 0.22758606490228261,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.307668992714284e-06,
"min": 7.307668992714284e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010230736589799998,
"min": 0.00010230736589799998,
"max": 0.0032549543150152994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10243585714285715,
"min": 0.10243585714285715,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.434102,
"min": 1.3886848,
"max": 2.4435260000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002533421285714285,
"min": 0.0002533421285714285,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003546789799999999,
"min": 0.003546789799999999,
"max": 0.10851997153000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010972435586154461,
"min": 0.010972435586154461,
"max": 0.4800242781639099,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1536141037940979,
"min": 0.1536141037940979,
"max": 3.3601698875427246,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 440.7647058823529,
"min": 407.42028985507244,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29972.0,
"min": 15984.0,
"max": 33045.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4121470437111223,
"min": -1.0000000521540642,
"max": 1.5152221980194251,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 96.02599897235632,
"min": -29.998801574110985,
"max": 109.0959982573986,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4121470437111223,
"min": -1.0000000521540642,
"max": 1.5152221980194251,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 96.02599897235632,
"min": -29.998801574110985,
"max": 109.0959982573986,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04895119851383189,
"min": 0.04867074583940532,
"max": 9.384271539747715,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.3286814989405684,
"min": 3.3286814989405684,
"max": 150.14834463596344,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1692516080",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1692518277"
},
"total": 2196.401969502,
"count": 1,
"self": 0.43066244599958736,
"children": {
"run_training.setup": {
"total": 0.04695926900001268,
"count": 1,
"self": 0.04695926900001268
},
"TrainerController.start_learning": {
"total": 2195.9243477870004,
"count": 1,
"self": 1.4222283420044732,
"children": {
"TrainerController._reset_env": {
"total": 5.070599105999918,
"count": 1,
"self": 5.070599105999918
},
"TrainerController.advance": {
"total": 2189.334574077996,
"count": 63625,
"self": 1.4117496020298859,
"children": {
"env_step": {
"total": 1536.9303757809505,
"count": 63625,
"self": 1426.4083871198518,
"children": {
"SubprocessEnvManager._take_step": {
"total": 109.67021155506427,
"count": 63625,
"self": 4.750360848044352,
"children": {
"TorchPolicy.evaluate": {
"total": 104.91985070701992,
"count": 62560,
"self": 104.91985070701992
}
}
},
"workers": {
"total": 0.8517771060343193,
"count": 63625,
"self": 0.0,
"children": {
"worker_root": {
"total": 2191.0017423540503,
"count": 63625,
"is_parallel": true,
"self": 879.4796832140314,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018444549999685478,
"count": 1,
"is_parallel": true,
"self": 0.0005373979997784772,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013070570001900705,
"count": 8,
"is_parallel": true,
"self": 0.0013070570001900705
}
}
},
"UnityEnvironment.step": {
"total": 0.0814473209999278,
"count": 1,
"is_parallel": true,
"self": 0.0005975269999680677,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047899200001211284,
"count": 1,
"is_parallel": true,
"self": 0.00047899200001211284
},
"communicator.exchange": {
"total": 0.07843964199992115,
"count": 1,
"is_parallel": true,
"self": 0.07843964199992115
},
"steps_from_proto": {
"total": 0.0019311600000264662,
"count": 1,
"is_parallel": true,
"self": 0.0003699930000493623,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015611669999771038,
"count": 8,
"is_parallel": true,
"self": 0.0015611669999771038
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1311.5220591400189,
"count": 63624,
"is_parallel": true,
"self": 33.91325898202581,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.924536279037852,
"count": 63624,
"is_parallel": true,
"self": 22.924536279037852
},
"communicator.exchange": {
"total": 1149.6489971579458,
"count": 63624,
"is_parallel": true,
"self": 1149.6489971579458
},
"steps_from_proto": {
"total": 105.0352667210093,
"count": 63624,
"is_parallel": true,
"self": 20.589172361947817,
"children": {
"_process_rank_one_or_two_observation": {
"total": 84.44609435906148,
"count": 508992,
"is_parallel": true,
"self": 84.44609435906148
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 650.9924486950158,
"count": 63625,
"self": 2.7003356991123155,
"children": {
"process_trajectory": {
"total": 109.49986051690928,
"count": 63625,
"self": 109.29078157690913,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20907894000015403,
"count": 2,
"self": 0.20907894000015403
}
}
},
"_update_policy": {
"total": 538.7922524789942,
"count": 445,
"self": 350.6468720720185,
"children": {
"TorchPPOOptimizer.update": {
"total": 188.14538040697573,
"count": 22827,
"self": 188.14538040697573
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0000003385357559e-06,
"count": 1,
"self": 1.0000003385357559e-06
},
"TrainerController._save_models": {
"total": 0.0969452609997461,
"count": 1,
"self": 0.0014422259996536013,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0955030350000925,
"count": 1,
"self": 0.0955030350000925
}
}
}
}
}
}
}