{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.4028151035308838, "min": 1.4028151035308838, "max": 1.4288065433502197, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 71218.1171875, "min": 69060.796875, "max": 77591.2265625, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 75.66257668711657, "min": 67.70027624309392, "max": 386.0615384615385, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49332.0, "min": 49015.0, "max": 50188.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999879.0, "min": 49568.0, "max": 1999879.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999879.0, "min": 49568.0, "max": 1999879.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.487884044647217, "min": 0.09975283592939377, "max": 2.53963041305542, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1622.1004638671875, "min": 12.868115425109863, "max": 1801.9638671875, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.7573663654502916, "min": 1.862935187396153, "max": 4.105184104697795, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 2449.80287027359, "min": 240.31863917410374, "max": 2817.628917992115, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.7573663654502916, "min": 1.862935187396153, "max": 4.105184104697795, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 2449.80287027359, "min": 240.31863917410374, "max": 2817.628917992115, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.016453603361878778, "min": 0.012736963199859019, "max": 0.020758286939235406, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.049360810085636336, "min": 0.025473926399718037, "max": 0.057723179733390376, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.0635308405591382, "min": 0.02178633517275254, "max": 0.0635308405591382, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.19059252167741458, "min": 0.04357267034550508, "max": 0.19059252167741458, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.855348714916664e-06, "min": 3.855348714916664e-06, "max": 0.0002953488765503749, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 1.1566046144749992e-05, "min": 1.1566046144749992e-05, "max": 0.0008439088686970499, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10128508333333332, "min": 0.10128508333333332, "max": 0.19844962499999996, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.30385524999999997, "min": 0.20772489999999996, "max": 0.5813029500000001, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 7.412565833333332e-05, "min": 7.412565833333332e-05, "max": 0.004922636287499999, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.00022237697499999997, "min": 0.00022237697499999997, "max": 0.014067017205, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1673107094", "python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics", "mlagents_version": "0.29.0.dev0", "mlagents_envs_version": "0.29.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.8.1+cu102", "numpy_version": "1.21.6", "end_time_seconds": "1673109261" }, "total": 2166.665210557, "count": 1, "self": 0.3908150500001284, "children": { "run_training.setup": { "total": 0.10369133599999714, "count": 1, "self": 0.10369133599999714 }, "TrainerController.start_learning": { "total": 2166.170704171, "count": 1, "self": 3.652257801029009, "children": { "TrainerController._reset_env": { "total": 7.95051317299999, "count": 1, "self": 7.95051317299999 }, "TrainerController.advance": { "total": 2154.4485887409714, "count": 233957, "self": 3.9054755749971264, "children": { "env_step": { "total": 1680.0782644619558, "count": 233957, "self": 1413.1094284969824, "children": { "SubprocessEnvManager._take_step": { "total": 264.42106964102754, "count": 233957, "self": 13.947094889975233, "children": { "TorchPolicy.evaluate": { "total": 250.4739747510523, "count": 223030, "self": 63.49317543903635, "children": { "TorchPolicy.sample_actions": { "total": 186.98079931201596, "count": 223030, "self": 186.98079931201596 } } } } }, "workers": { "total": 2.5477663239457797, "count": 233957, "self": 0.0, "children": { "worker_root": { "total": 2158.669352113948, "count": 233957, "is_parallel": true, "self": 993.4299178949343, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0020922900000641675, "count": 1, "is_parallel": true, "self": 0.00028018800003337674, "children": { "_process_rank_one_or_two_observation": { "total": 0.0018121020000307908, "count": 2, "is_parallel": true, "self": 0.0018121020000307908 } } }, "UnityEnvironment.step": { "total": 0.02613750299997264, "count": 1, "is_parallel": true, "self": 0.000263822000079017, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00019572099995457393, "count": 1, "is_parallel": true, "self": 0.00019572099995457393 }, "communicator.exchange": { "total": 0.0249469489999683, "count": 1, "is_parallel": true, "self": 0.0249469489999683 }, "steps_from_proto": { "total": 0.0007310109999707493, "count": 1, "is_parallel": true, "self": 0.0002396639999915351, "children": { "_process_rank_one_or_two_observation": { "total": 0.0004913469999792142, "count": 2, "is_parallel": true, "self": 0.0004913469999792142 } } } } } } }, "UnityEnvironment.step": { "total": 1165.239434219014, "count": 233956, "is_parallel": true, "self": 33.46578382798043, "children": { "UnityEnvironment._generate_step_input": { "total": 73.88071375097843, "count": 233956, "is_parallel": true, "self": 73.88071375097843 }, "communicator.exchange": { "total": 966.6561637439845, "count": 233956, "is_parallel": true, "self": 966.6561637439845 }, "steps_from_proto": { "total": 91.23677289607053, "count": 233956, "is_parallel": true, "self": 37.45640671298463, "children": { "_process_rank_one_or_two_observation": { "total": 53.7803661830859, "count": 467912, "is_parallel": true, "self": 53.7803661830859 } } } } } } } } } } }, "trainer_advance": { "total": 470.4648487040188, "count": 233957, "self": 5.939337190998003, "children": { "process_trajectory": { "total": 151.78811472402128, "count": 233957, "self": 150.5956008040215, "children": { "RLTrainer._checkpoint": { "total": 1.192513919999783, "count": 10, "self": 1.192513919999783 } } }, "_update_policy": { "total": 312.7373967889995, "count": 97, "self": 260.10850436100407, "children": { "TorchPPOOptimizer.update": { "total": 52.628892427995424, "count": 2910, "self": 52.628892427995424 } } } } } } }, "trainer_threads": { "total": 9.589998626324814e-07, "count": 1, "self": 9.589998626324814e-07 }, "TrainerController._save_models": { "total": 0.11934349700004532, "count": 1, "self": 0.001956787999915832, "children": { "RLTrainer._checkpoint": { "total": 0.11738670900012949, "count": 1, "self": 0.11738670900012949 } } } } } } }