{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.4030468463897705, "min": 1.4030468463897705, "max": 1.425045132637024, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 69661.2734375, "min": 69159.875, "max": 76323.6953125, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 80.03764320785598, "min": 75.61656441717791, "max": 374.82835820895525, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 48903.0, "min": 48719.0, "max": 50227.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999963.0, "min": 49611.0, "max": 1999963.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999963.0, "min": 49611.0, "max": 1999963.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.435166358947754, "min": 0.07531054317951202, "max": 2.508405923843384, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1487.8865966796875, "min": 10.016302108764648, "max": 1538.64208984375, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.755780282465409, "min": 1.8995083709408467, "max": 3.9711147397240967, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 2294.7817525863647, "min": 252.6346133351326, "max": 2435.8218958973885, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.755780282465409, "min": 1.8995083709408467, "max": 3.9711147397240967, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 2294.7817525863647, "min": 252.6346133351326, "max": 2435.8218958973885, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.016938703165457508, "min": 0.013770369880270056, "max": 0.019945037739429003, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.050816109496372525, "min": 0.02754073976054011, "max": 0.05972981700130428, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.05444325018260213, "min": 0.02243803832679987, "max": 0.0614647600799799, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.1633297505478064, "min": 0.04487607665359974, "max": 0.16761799864470958, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.4568988477333375e-06, "min": 3.4568988477333375e-06, "max": 0.00029536080154639994, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 1.0370696543200013e-05, "min": 1.0370696543200013e-05, "max": 0.0008441184186271999, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.1011522666666667, "min": 0.1011522666666667, "max": 0.1984536, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.3034568000000001, "min": 0.20754625, "max": 0.5813728, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 6.749810666666675e-05, "min": 6.749810666666675e-05, "max": 0.00492283464, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.00020249432000000024, "min": 0.00020249432000000024, "max": 0.014070502720000001, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1689937536", "python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1689940056" }, "total": 2519.589501247, "count": 1, "self": 0.7832221630001186, "children": { "run_training.setup": { "total": 0.03489442800002962, "count": 1, "self": 0.03489442800002962 }, "TrainerController.start_learning": { "total": 2518.771384656, "count": 1, "self": 4.849106085965559, "children": { "TrainerController._reset_env": { "total": 5.888467373000026, "count": 1, "self": 5.888467373000026 }, "TrainerController.advance": { "total": 2507.836061600035, "count": 232940, "self": 4.638577371055817, "children": { "env_step": { "total": 1943.2063936049378, "count": 232940, "self": 1641.5662512828944, "children": { "SubprocessEnvManager._take_step": { "total": 298.5725179000168, "count": 232940, "self": 17.27195515598555, "children": { "TorchPolicy.evaluate": { "total": 281.30056274403125, "count": 222900, "self": 281.30056274403125 } } }, "workers": { "total": 3.067624422026711, "count": 232940, "self": 0.0, "children": { "worker_root": { "total": 2510.6001181039824, "count": 232940, "is_parallel": true, "self": 1171.00949380799, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0008716070000218679, "count": 1, "is_parallel": true, "self": 0.0002760920000355327, "children": { "_process_rank_one_or_two_observation": { "total": 0.0005955149999863352, "count": 2, "is_parallel": true, "self": 0.0005955149999863352 } } }, "UnityEnvironment.step": { "total": 0.03126937599995472, "count": 1, "is_parallel": true, "self": 0.00037978900002144655, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00022211199996036157, "count": 1, "is_parallel": true, "self": 0.00022211199996036157 }, "communicator.exchange": { "total": 0.029841120000014598, "count": 1, "is_parallel": true, "self": 0.029841120000014598 }, "steps_from_proto": { "total": 0.000826354999958312, "count": 1, "is_parallel": true, "self": 0.00022125699990738212, "children": { "_process_rank_one_or_two_observation": { "total": 0.0006050980000509298, "count": 2, "is_parallel": true, "self": 0.0006050980000509298 } } } } } } }, "UnityEnvironment.step": { "total": 1339.5906242959923, "count": 232939, "is_parallel": true, "self": 41.25404065500152, "children": { "UnityEnvironment._generate_step_input": { "total": 80.78105103198948, "count": 232939, "is_parallel": true, "self": 80.78105103198948 }, "communicator.exchange": { "total": 1119.3202434590335, "count": 232939, "is_parallel": true, "self": 1119.3202434590335 }, "steps_from_proto": { "total": 98.23528914996757, "count": 232939, "is_parallel": true, "self": 35.028356233020986, "children": { "_process_rank_one_or_two_observation": { "total": 63.206932916946585, "count": 465878, "is_parallel": true, "self": 63.206932916946585 } } } } } } } } } } }, "trainer_advance": { "total": 559.9910906240416, "count": 232940, "self": 6.947129264086016, "children": { "process_trajectory": { "total": 144.63987947295539, "count": 232940, "self": 143.2919785649549, "children": { "RLTrainer._checkpoint": { "total": 1.3479009080004971, "count": 10, "self": 1.3479009080004971 } } }, "_update_policy": { "total": 408.4040818870002, "count": 97, "self": 347.7526267869955, "children": { "TorchPPOOptimizer.update": { "total": 60.65145510000468, "count": 2910, "self": 60.65145510000468 } } } } } } }, "trainer_threads": { "total": 1.4059996829018928e-06, "count": 1, "self": 1.4059996829018928e-06 }, "TrainerController._save_models": { "total": 0.19774819099984597, "count": 1, "self": 0.0026603749997775594, "children": { "RLTrainer._checkpoint": { "total": 0.19508781600006841, "count": 1, "self": 0.19508781600006841 } } } } } } }