ppo-Pyramids / run_logs /timers.json
Keenan5755's picture
First Push
938b42f
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.32846593856811523,
"min": 0.32846593856811523,
"max": 1.455418586730957,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9875.0,
"min": 9875.0,
"max": 44151.578125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989993.0,
"min": 29952.0,
"max": 989993.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989993.0,
"min": 29952.0,
"max": 989993.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.570151686668396,
"min": -0.09297454357147217,
"max": 0.570151686668396,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 161.92308044433594,
"min": -22.406864166259766,
"max": 161.92308044433594,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.011242495849728584,
"min": -0.02884085662662983,
"max": 0.2717488706111908,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -3.192868709564209,
"min": -7.094850540161133,
"max": 65.4914779663086,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07088984576763498,
"min": 0.06528661026955576,
"max": 0.07451107542562697,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9924578407468896,
"min": 0.5155871145282711,
"max": 1.051984154603298,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015956498018654418,
"min": 0.00022249645558307656,
"max": 0.01681934588616329,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22339097226116184,
"min": 0.002892453922579995,
"max": 0.23547084240628607,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.349304693121432e-06,
"min": 7.349304693121432e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010289026570370005,
"min": 0.00010289026570370005,
"max": 0.0036327511890829997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10244973571428573,
"min": 0.10244973571428573,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4342963000000002,
"min": 1.3886848,
"max": 2.6109169999999997,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025472859785714304,
"min": 0.00025472859785714304,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035662003700000023,
"min": 0.0035662003700000023,
"max": 0.12111060829999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012695064768195152,
"min": 0.012695064768195152,
"max": 0.3957328796386719,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.17773090302944183,
"min": 0.17773090302944183,
"max": 2.770130157470703,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 310.49484536082474,
"min": 310.49484536082474,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30118.0,
"min": 15984.0,
"max": 34842.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6895051365046156,
"min": -1.0000000521540642,
"max": 1.6895051365046156,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 163.88199824094772,
"min": -29.997201591730118,
"max": 163.88199824094772,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6895051365046156,
"min": -1.0000000521540642,
"max": 1.6895051365046156,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 163.88199824094772,
"min": -29.997201591730118,
"max": 163.88199824094772,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04097032838196646,
"min": 0.04097032838196646,
"max": 7.656364715658128,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.9741218530507467,
"min": 3.966332409516326,
"max": 122.50183545053005,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1696216280",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1696218596"
},
"total": 2316.4659534889997,
"count": 1,
"self": 0.8548761499996544,
"children": {
"run_training.setup": {
"total": 0.06506582799988792,
"count": 1,
"self": 0.06506582799988792
},
"TrainerController.start_learning": {
"total": 2315.546011511,
"count": 1,
"self": 1.3845496130511492,
"children": {
"TrainerController._reset_env": {
"total": 5.078607113000089,
"count": 1,
"self": 5.078607113000089
},
"TrainerController.advance": {
"total": 2308.9258635529486,
"count": 63823,
"self": 1.3580597349646268,
"children": {
"env_step": {
"total": 1638.0729142429632,
"count": 63823,
"self": 1528.737767837917,
"children": {
"SubprocessEnvManager._take_step": {
"total": 108.51000391605407,
"count": 63823,
"self": 4.665571416114062,
"children": {
"TorchPolicy.evaluate": {
"total": 103.84443249994001,
"count": 62548,
"self": 103.84443249994001
}
}
},
"workers": {
"total": 0.8251424889920145,
"count": 63823,
"self": 0.0,
"children": {
"worker_root": {
"total": 2309.9691839720526,
"count": 63823,
"is_parallel": true,
"self": 895.4351654260875,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0026563599999462895,
"count": 1,
"is_parallel": true,
"self": 0.0007403039999189787,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019160560000273108,
"count": 8,
"is_parallel": true,
"self": 0.0019160560000273108
}
}
},
"UnityEnvironment.step": {
"total": 0.047437200000103985,
"count": 1,
"is_parallel": true,
"self": 0.0005837809999320598,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005060910000338481,
"count": 1,
"is_parallel": true,
"self": 0.0005060910000338481
},
"communicator.exchange": {
"total": 0.04366749800010439,
"count": 1,
"is_parallel": true,
"self": 0.04366749800010439
},
"steps_from_proto": {
"total": 0.0026798300000336894,
"count": 1,
"is_parallel": true,
"self": 0.0003492420000839047,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023305879999497847,
"count": 8,
"is_parallel": true,
"self": 0.0023305879999497847
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1414.5340185459652,
"count": 63822,
"is_parallel": true,
"self": 33.78076866198103,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.049014497969665,
"count": 63822,
"is_parallel": true,
"self": 23.049014497969665
},
"communicator.exchange": {
"total": 1253.9083839499501,
"count": 63822,
"is_parallel": true,
"self": 1253.9083839499501
},
"steps_from_proto": {
"total": 103.79585143606437,
"count": 63822,
"is_parallel": true,
"self": 20.435371406866352,
"children": {
"_process_rank_one_or_two_observation": {
"total": 83.36048002919802,
"count": 510576,
"is_parallel": true,
"self": 83.36048002919802
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 669.4948895750208,
"count": 63823,
"self": 2.5577473480600474,
"children": {
"process_trajectory": {
"total": 109.60221011995463,
"count": 63823,
"self": 109.34100844595423,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2612016740004037,
"count": 2,
"self": 0.2612016740004037
}
}
},
"_update_policy": {
"total": 557.3349321070061,
"count": 453,
"self": 366.09167929403475,
"children": {
"TorchPPOOptimizer.update": {
"total": 191.24325281297138,
"count": 22803,
"self": 191.24325281297138
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.635999979043845e-06,
"count": 1,
"self": 1.635999979043845e-06
},
"TrainerController._save_models": {
"total": 0.1569895960001304,
"count": 1,
"self": 0.0018932640000457468,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15509633200008466,
"count": 1,
"self": 0.15509633200008466
}
}
}
}
}
}
}