ppo-Huggy / run_logs /timers.json
Raaniel's picture
Huggy
c27d832
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4615787267684937,
"min": 1.4615787267684937,
"max": 1.4631232023239136,
"count": 21
},
"Huggy.Policy.Entropy.sum": {
"value": 74242.3515625,
"min": 1629.9193115234375,
"max": 74497.140625,
"count": 21
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 85.89409722222223,
"min": 38.125,
"max": 85.89409722222223,
"count": 21
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49475.0,
"min": 610.0,
"max": 49955.0,
"count": 21
},
"Huggy.Step.mean": {
"value": 4999998.0,
"min": 3999975.0,
"max": 4999998.0,
"count": 21
},
"Huggy.Step.sum": {
"value": 4999998.0,
"min": 3999975.0,
"max": 4999998.0,
"count": 21
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.529625415802002,
"min": 2.0727760791778564,
"max": 2.5629072189331055,
"count": 21
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1457.064208984375,
"min": 31.091642379760742,
"max": 1960.6240234375,
"count": 21
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.9443130030607185,
"min": 2.8365768909454347,
"max": 4.030576722112218,
"count": 21
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2271.924289762974,
"min": 42.54865336418152,
"max": 3056.473274052143,
"count": 21
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.9443130030607185,
"min": 2.8365768909454347,
"max": 4.030576722112218,
"count": 21
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2271.924289762974,
"min": 42.54865336418152,
"max": 3056.473274052143,
"count": 21
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 21
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 21
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01645280432494474,
"min": 0.012763792780606308,
"max": 0.01964172369625885,
"count": 20
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03290560864988948,
"min": 0.025527585561212617,
"max": 0.05818501380088037,
"count": 20
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0511211009696126,
"min": 0.050411403831094506,
"max": 0.06767749013379216,
"count": 20
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1022422019392252,
"min": 0.10082280766218901,
"max": 0.18741850070655347,
"count": 20
},
"Huggy.Policy.LearningRate.mean": {
"value": 1.3346195551600054e-06,
"min": 1.3346195551600054e-06,
"max": 5.818682060442e-05,
"count": 20
},
"Huggy.Policy.LearningRate.sum": {
"value": 2.6692391103200107e-06,
"min": 2.6692391103200107e-06,
"max": 0.00015793068735651996,
"count": 20
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10000000000000002,
"min": 0.10000000000000002,
"max": 0.10000000000000002,
"count": 20
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20000000000000004,
"min": 0.20000000000000004,
"max": 0.30000000000000004,
"count": 20
},
"Huggy.Policy.Beta.mean": {
"value": 3.21975160000001e-05,
"min": 3.21975160000001e-05,
"max": 0.0009778394420000003,
"count": 20
},
"Huggy.Policy.Beta.sum": {
"value": 6.43950320000002e-05,
"min": 6.43950320000002e-05,
"max": 0.002656909652,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1687614204",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1687615397"
},
"total": 1192.3650826270004,
"count": 1,
"self": 0.7867250870003772,
"children": {
"run_training.setup": {
"total": 0.037453466999977536,
"count": 1,
"self": 0.037453466999977536
},
"TrainerController.start_learning": {
"total": 1191.540904073,
"count": 1,
"self": 2.2832590788630114,
"children": {
"TrainerController._reset_env": {
"total": 5.247246918000201,
"count": 1,
"self": 5.247246918000201
},
"TrainerController.advance": {
"total": 1183.8228053371377,
"count": 118227,
"self": 2.377192093001213,
"children": {
"env_step": {
"total": 969.1501620849649,
"count": 118227,
"self": 820.3189952230896,
"children": {
"SubprocessEnvManager._take_step": {
"total": 147.4172532687835,
"count": 118227,
"self": 8.327659440872594,
"children": {
"TorchPolicy.evaluate": {
"total": 139.0895938279109,
"count": 111786,
"self": 139.0895938279109
}
}
},
"workers": {
"total": 1.41391359309182,
"count": 118227,
"self": 0.0,
"children": {
"worker_root": {
"total": 1187.3567987089855,
"count": 118227,
"is_parallel": true,
"self": 512.0929151738137,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0007698909994360292,
"count": 1,
"is_parallel": true,
"self": 0.00021642999945470365,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005534609999813256,
"count": 2,
"is_parallel": true,
"self": 0.0005534609999813256
}
}
},
"UnityEnvironment.step": {
"total": 0.07165625600009662,
"count": 1,
"is_parallel": true,
"self": 0.00032951399953162763,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00022036300015315646,
"count": 1,
"is_parallel": true,
"self": 0.00022036300015315646
},
"communicator.exchange": {
"total": 0.07035627200002637,
"count": 1,
"is_parallel": true,
"self": 0.07035627200002637
},
"steps_from_proto": {
"total": 0.0007501070003854693,
"count": 1,
"is_parallel": true,
"self": 0.00022152299970912281,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005285840006763465,
"count": 2,
"is_parallel": true,
"self": 0.0005285840006763465
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 675.2638835351718,
"count": 118226,
"is_parallel": true,
"self": 19.8972707592402,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 40.74938854701486,
"count": 118226,
"is_parallel": true,
"self": 40.74938854701486
},
"communicator.exchange": {
"total": 565.1960905508777,
"count": 118226,
"is_parallel": true,
"self": 565.1960905508777
},
"steps_from_proto": {
"total": 49.42113367803904,
"count": 118226,
"is_parallel": true,
"self": 17.638342123775146,
"children": {
"_process_rank_one_or_two_observation": {
"total": 31.782791554263895,
"count": 236452,
"is_parallel": true,
"self": 31.782791554263895
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 212.29545115917153,
"count": 118227,
"self": 3.3879345521399955,
"children": {
"process_trajectory": {
"total": 77.31324694803061,
"count": 118227,
"self": 76.35542192603043,
"children": {
"RLTrainer._checkpoint": {
"total": 0.9578250220001792,
"count": 6,
"self": 0.9578250220001792
}
}
},
"_update_policy": {
"total": 131.59426965900093,
"count": 48,
"self": 111.28623789800713,
"children": {
"TorchPPOOptimizer.update": {
"total": 20.3080317609938,
"count": 960,
"self": 20.3080317609938
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4039997040526941e-06,
"count": 1,
"self": 1.4039997040526941e-06
},
"TrainerController._save_models": {
"total": 0.1875913349995244,
"count": 1,
"self": 0.002902106999499665,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18468922800002474,
"count": 1,
"self": 0.18468922800002474
}
}
}
}
}
}
}