ppo-Huggy / run_logs /timers.json
SwePalm's picture
My first Huggy
bf21eb4
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.399888277053833,
"min": 1.399888277053833,
"max": 1.4249364137649536,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70439.578125,
"min": 68025.015625,
"max": 78359.828125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 89.08648648648649,
"min": 77.79179810725552,
"max": 377.9398496240602,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49443.0,
"min": 49186.0,
"max": 50266.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999995.0,
"min": 49987.0,
"max": 1999995.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999995.0,
"min": 49987.0,
"max": 1999995.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4492268562316895,
"min": -0.03878455609083176,
"max": 2.50095272064209,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1359.3209228515625,
"min": -5.119561195373535,
"max": 1559.0411376953125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6885050519092664,
"min": 1.678141678034356,
"max": 3.990134831192424,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2047.1203038096428,
"min": 221.514701500535,
"max": 2429.2083029150963,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6885050519092664,
"min": 1.678141678034356,
"max": 3.990134831192424,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2047.1203038096428,
"min": 221.514701500535,
"max": 2429.2083029150963,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016264450806193053,
"min": 0.014489772990850724,
"max": 0.01853118933051721,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.048793352418579156,
"min": 0.028979545981701448,
"max": 0.05559356799155163,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05981135422156917,
"min": 0.024115596959988276,
"max": 0.06136278249323368,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17943406266470752,
"min": 0.04823119391997655,
"max": 0.17943406266470752,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.717248760950002e-06,
"min": 3.717248760950002e-06,
"max": 0.0002953394265535249,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1151746282850006e-05,
"min": 1.1151746282850006e-05,
"max": 0.0008439634686788499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10123905000000001,
"min": 0.10123905000000001,
"max": 0.19844647499999998,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30371715000000005,
"min": 0.20761515000000003,
"max": 0.5813211499999998,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.182859500000004e-05,
"min": 7.182859500000004e-05,
"max": 0.0049224791025,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021548578500000013,
"min": 0.00021548578500000013,
"max": 0.014067925385000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670772507",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=SweHuggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670774728"
},
"total": 2220.980603321,
"count": 1,
"self": 0.4019460409999738,
"children": {
"run_training.setup": {
"total": 0.10921331299999792,
"count": 1,
"self": 0.10921331299999792
},
"TrainerController.start_learning": {
"total": 2220.469443967,
"count": 1,
"self": 3.825907915027983,
"children": {
"TrainerController._reset_env": {
"total": 10.307026213999961,
"count": 1,
"self": 10.307026213999961
},
"TrainerController.advance": {
"total": 2206.227799740973,
"count": 232965,
"self": 4.215903987997535,
"children": {
"env_step": {
"total": 1733.091290446005,
"count": 232965,
"self": 1454.540573806047,
"children": {
"SubprocessEnvManager._take_step": {
"total": 275.9247874609812,
"count": 232965,
"self": 14.118936214034989,
"children": {
"TorchPolicy.evaluate": {
"total": 261.8058512469462,
"count": 223048,
"self": 66.35848251999442,
"children": {
"TorchPolicy.sample_actions": {
"total": 195.44736872695177,
"count": 223048,
"self": 195.44736872695177
}
}
}
}
},
"workers": {
"total": 2.625929178976776,
"count": 232965,
"self": 0.0,
"children": {
"worker_root": {
"total": 2212.4539407389984,
"count": 232965,
"is_parallel": true,
"self": 1018.6113009570088,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0025853409999854193,
"count": 1,
"is_parallel": true,
"self": 0.00038607400000501,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021992669999804093,
"count": 2,
"is_parallel": true,
"self": 0.0021992669999804093
}
}
},
"UnityEnvironment.step": {
"total": 0.02780279699999255,
"count": 1,
"is_parallel": true,
"self": 0.00027512400004070514,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00022170900001583504,
"count": 1,
"is_parallel": true,
"self": 0.00022170900001583504
},
"communicator.exchange": {
"total": 0.0265645309999627,
"count": 1,
"is_parallel": true,
"self": 0.0265645309999627
},
"steps_from_proto": {
"total": 0.0007414329999733127,
"count": 1,
"is_parallel": true,
"self": 0.0002761039999654713,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004653290000078414,
"count": 2,
"is_parallel": true,
"self": 0.0004653290000078414
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1193.8426397819896,
"count": 232964,
"is_parallel": true,
"self": 34.988119229893755,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 79.8569980800674,
"count": 232964,
"is_parallel": true,
"self": 79.8569980800674
},
"communicator.exchange": {
"total": 983.3842635060485,
"count": 232964,
"is_parallel": true,
"self": 983.3842635060485
},
"steps_from_proto": {
"total": 95.61325896598015,
"count": 232964,
"is_parallel": true,
"self": 41.543777267871974,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.06948169810818,
"count": 465928,
"is_parallel": true,
"self": 54.06948169810818
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 468.9206053069705,
"count": 232965,
"self": 5.934655194934692,
"children": {
"process_trajectory": {
"total": 152.4199962580371,
"count": 232965,
"self": 151.95543200903734,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4645642489997499,
"count": 4,
"self": 0.4645642489997499
}
}
},
"_update_policy": {
"total": 310.56595385399874,
"count": 97,
"self": 258.6435466170029,
"children": {
"TorchPPOOptimizer.update": {
"total": 51.92240723699581,
"count": 2910,
"self": 51.92240723699581
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.849997357174288e-07,
"count": 1,
"self": 8.849997357174288e-07
},
"TrainerController._save_models": {
"total": 0.10870921199966688,
"count": 1,
"self": 0.001905475000057777,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1068037369996091,
"count": 1,
"self": 0.1068037369996091
}
}
}
}
}
}
}