ppo-Huggy / run_logs /timers.json
ThoDum's picture
Huggy
7927a99
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4014153480529785,
"min": 1.4014153480529785,
"max": 1.425142526626587,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70464.5625,
"min": 68280.28125,
"max": 78825.75,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 79.60450160771704,
"min": 72.55571847507332,
"max": 383.19083969465646,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49514.0,
"min": 49048.0,
"max": 50198.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999954.0,
"min": 49809.0,
"max": 1999954.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999954.0,
"min": 49809.0,
"max": 1999954.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.457686185836792,
"min": 0.09975236654281616,
"max": 2.5221385955810547,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1526.22314453125,
"min": 12.96780776977539,
"max": 1627.763671875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7786466425551692,
"min": 1.7660236668128233,
"max": 4.05224237580822,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2346.53956502676,
"min": 229.58307668566704,
"max": 2581.3523367643356,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7786466425551692,
"min": 1.7660236668128233,
"max": 4.05224237580822,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2346.53956502676,
"min": 229.58307668566704,
"max": 2581.3523367643356,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015355786049217892,
"min": 0.013287628344793726,
"max": 0.02126010176434647,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04606735814765368,
"min": 0.02657525668958745,
"max": 0.056038522940070834,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05891480942567189,
"min": 0.022040133488674958,
"max": 0.06478350590914488,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17674442827701567,
"min": 0.044080266977349916,
"max": 0.1845672270903985,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.785198738299997e-06,
"min": 3.785198738299997e-06,
"max": 0.00029535285154904994,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1355596214899991e-05,
"min": 1.1355596214899991e-05,
"max": 0.0008443716185427998,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1012617,
"min": 0.1012617,
"max": 0.19845094999999996,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3037851,
"min": 0.20765400000000006,
"max": 0.5814572,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.295882999999995e-05,
"min": 7.295882999999995e-05,
"max": 0.004922702405,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021887648999999987,
"min": 0.00021887648999999987,
"max": 0.014074714279999997,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678270393",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1678272846"
},
"total": 2452.930687726,
"count": 1,
"self": 0.44855799400011165,
"children": {
"run_training.setup": {
"total": 0.1169941990000325,
"count": 1,
"self": 0.1169941990000325
},
"TrainerController.start_learning": {
"total": 2452.365135533,
"count": 1,
"self": 4.320131667011992,
"children": {
"TrainerController._reset_env": {
"total": 9.872488541999985,
"count": 1,
"self": 9.872488541999985
},
"TrainerController.advance": {
"total": 2438.0604559189874,
"count": 233000,
"self": 4.637487230934767,
"children": {
"env_step": {
"total": 1891.3570427779748,
"count": 233000,
"self": 1586.9767751369398,
"children": {
"SubprocessEnvManager._take_step": {
"total": 301.55376623407295,
"count": 233000,
"self": 16.27611722898473,
"children": {
"TorchPolicy.evaluate": {
"total": 285.2776490050882,
"count": 223043,
"self": 71.1004356171008,
"children": {
"TorchPolicy.sample_actions": {
"total": 214.17721338798742,
"count": 223043,
"self": 214.17721338798742
}
}
}
}
},
"workers": {
"total": 2.826501406962052,
"count": 233000,
"self": 0.0,
"children": {
"worker_root": {
"total": 2443.8941267739915,
"count": 233000,
"is_parallel": true,
"self": 1152.1596722330853,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009669599999710954,
"count": 1,
"is_parallel": true,
"self": 0.00033978900000875,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006271709999623454,
"count": 2,
"is_parallel": true,
"self": 0.0006271709999623454
}
}
},
"UnityEnvironment.step": {
"total": 0.05082217099999298,
"count": 1,
"is_parallel": true,
"self": 0.00030833400001029077,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00022362599997904908,
"count": 1,
"is_parallel": true,
"self": 0.00022362599997904908
},
"communicator.exchange": {
"total": 0.049549022999997305,
"count": 1,
"is_parallel": true,
"self": 0.049549022999997305
},
"steps_from_proto": {
"total": 0.000741188000006332,
"count": 1,
"is_parallel": true,
"self": 0.00023918800002320495,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005019999999831271,
"count": 2,
"is_parallel": true,
"self": 0.0005019999999831271
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1291.7344545409062,
"count": 232999,
"is_parallel": true,
"self": 38.83175269781873,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 81.42324352493671,
"count": 232999,
"is_parallel": true,
"self": 81.42324352493671
},
"communicator.exchange": {
"total": 1078.96576714906,
"count": 232999,
"is_parallel": true,
"self": 1078.96576714906
},
"steps_from_proto": {
"total": 92.51369116909075,
"count": 232999,
"is_parallel": true,
"self": 37.21313517305509,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.300555996035655,
"count": 465998,
"is_parallel": true,
"self": 55.300555996035655
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 542.065925910078,
"count": 233000,
"self": 6.347754026025882,
"children": {
"process_trajectory": {
"total": 171.2920907820511,
"count": 233000,
"self": 170.04672906705116,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2453617149999445,
"count": 10,
"self": 1.2453617149999445
}
}
},
"_update_policy": {
"total": 364.42608110200104,
"count": 97,
"self": 305.4959941900055,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.930086911995545,
"count": 2910,
"self": 58.930086911995545
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.980000529845711e-07,
"count": 1,
"self": 8.980000529845711e-07
},
"TrainerController._save_models": {
"total": 0.11205850700025621,
"count": 1,
"self": 0.0020696910005426616,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10998881599971355,
"count": 1,
"self": 0.10998881599971355
}
}
}
}
}
}
}