ppo-Huggy / run_logs /timers.json
Farseer-W's picture
Huggy tutorial
1269d30 verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4050235748291016,
"min": 1.4050235748291016,
"max": 1.4302114248275757,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70400.109375,
"min": 67793.3125,
"max": 77835.765625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 101.60975609756098,
"min": 98.78043912175649,
"max": 407.41129032258067,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49992.0,
"min": 48918.0,
"max": 50519.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999832.0,
"min": 49916.0,
"max": 1999832.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999832.0,
"min": 49916.0,
"max": 1999832.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3888235092163086,
"min": 0.07231215387582779,
"max": 2.433319330215454,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1175.3011474609375,
"min": 8.894394874572754,
"max": 1182.61767578125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5928084528058526,
"min": 1.8298694610111113,
"max": 3.7975245890008855,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1767.6617587804794,
"min": 225.07394370436668,
"max": 1850.9658942818642,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5928084528058526,
"min": 1.8298694610111113,
"max": 3.7975245890008855,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1767.6617587804794,
"min": 225.07394370436668,
"max": 1850.9658942818642,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015365761225176458,
"min": 0.013620674798989462,
"max": 0.01950244252681538,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04609728367552937,
"min": 0.02854787684288264,
"max": 0.05757323583529797,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05312614333298471,
"min": 0.022840914937357105,
"max": 0.06506626984725396,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15937842999895413,
"min": 0.04568182987471421,
"max": 0.1922785542905331,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.180148939983334e-06,
"min": 3.180148939983334e-06,
"max": 0.00029529292656902496,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.540446819950002e-06,
"min": 9.540446819950002e-06,
"max": 0.0008440536186488,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10106001666666664,
"min": 0.10106001666666664,
"max": 0.198430975,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3031800499999999,
"min": 0.20728579999999996,
"max": 0.5813512,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.28948316666667e-05,
"min": 6.28948316666667e-05,
"max": 0.0049217056525,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00018868449500000008,
"min": 0.00018868449500000008,
"max": 0.014069424880000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1731241387",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1731243990"
},
"total": 2602.819777113,
"count": 1,
"self": 0.47631533200001286,
"children": {
"run_training.setup": {
"total": 0.07400716999995893,
"count": 1,
"self": 0.07400716999995893
},
"TrainerController.start_learning": {
"total": 2602.2694546109997,
"count": 1,
"self": 4.828500283992071,
"children": {
"TrainerController._reset_env": {
"total": 5.168301605000124,
"count": 1,
"self": 5.168301605000124
},
"TrainerController.advance": {
"total": 2592.1557530120076,
"count": 231002,
"self": 4.85014278699191,
"children": {
"env_step": {
"total": 2037.0381157140052,
"count": 231002,
"self": 1611.248492879052,
"children": {
"SubprocessEnvManager._take_step": {
"total": 422.70121745401707,
"count": 231002,
"self": 16.675836991015558,
"children": {
"TorchPolicy.evaluate": {
"total": 406.0253804630015,
"count": 222910,
"self": 406.0253804630015
}
}
},
"workers": {
"total": 3.0884053809361376,
"count": 231002,
"self": 0.0,
"children": {
"worker_root": {
"total": 2594.683810057101,
"count": 231002,
"is_parallel": true,
"self": 1279.644614961024,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010129730001153803,
"count": 1,
"is_parallel": true,
"self": 0.00030114900005173695,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007118240000636433,
"count": 2,
"is_parallel": true,
"self": 0.0007118240000636433
}
}
},
"UnityEnvironment.step": {
"total": 0.02936152199981734,
"count": 1,
"is_parallel": true,
"self": 0.00039552899988848367,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020261100007701316,
"count": 1,
"is_parallel": true,
"self": 0.00020261100007701316
},
"communicator.exchange": {
"total": 0.02797852999992756,
"count": 1,
"is_parallel": true,
"self": 0.02797852999992756
},
"steps_from_proto": {
"total": 0.0007848519999242853,
"count": 1,
"is_parallel": true,
"self": 0.00023028799978419556,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005545640001400898,
"count": 2,
"is_parallel": true,
"self": 0.0005545640001400898
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1315.0391950960768,
"count": 231001,
"is_parallel": true,
"self": 40.457859217124906,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 84.16721845703546,
"count": 231001,
"is_parallel": true,
"self": 84.16721845703546
},
"communicator.exchange": {
"total": 1097.352850947956,
"count": 231001,
"is_parallel": true,
"self": 1097.352850947956
},
"steps_from_proto": {
"total": 93.06126647396036,
"count": 231001,
"is_parallel": true,
"self": 32.98819011703222,
"children": {
"_process_rank_one_or_two_observation": {
"total": 60.07307635692814,
"count": 462002,
"is_parallel": true,
"self": 60.07307635692814
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 550.2674945110105,
"count": 231002,
"self": 7.054780512922662,
"children": {
"process_trajectory": {
"total": 167.15922600008594,
"count": 231002,
"self": 165.85004836208623,
"children": {
"RLTrainer._checkpoint": {
"total": 1.30917763799971,
"count": 10,
"self": 1.30917763799971
}
}
},
"_update_policy": {
"total": 376.05348799800186,
"count": 97,
"self": 304.7169574499974,
"children": {
"TorchPPOOptimizer.update": {
"total": 71.33653054800448,
"count": 2910,
"self": 71.33653054800448
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.259997568733525e-07,
"count": 1,
"self": 9.259997568733525e-07
},
"TrainerController._save_models": {
"total": 0.11689878400011366,
"count": 1,
"self": 0.0021771669998997822,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11472161700021388,
"count": 1,
"self": 0.11472161700021388
}
}
}
}
}
}
}