ppo-Huggy / run_logs /timers.json
SecondTheFirst's picture
Huggy
aef6f6a
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4073259830474854,
"min": 1.4073259830474854,
"max": 1.4273267984390259,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 72245.078125,
"min": 69184.1640625,
"max": 76074.109375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 80.19318181818181,
"min": 70.46,
"max": 377.56390977443607,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49399.0,
"min": 49044.0,
"max": 50216.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999934.0,
"min": 49733.0,
"max": 1999934.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999934.0,
"min": 49733.0,
"max": 1999934.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.520026445388794,
"min": 0.10002531856298447,
"max": 2.542757749557495,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1552.3363037109375,
"min": 13.20334243774414,
"max": 1779.930419921875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.96726676366933,
"min": 1.8684599816121839,
"max": 4.113958208619615,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2443.836326420307,
"min": 246.63671757280827,
"max": 2731.7513710856438,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.96726676366933,
"min": 1.8684599816121839,
"max": 4.113958208619615,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2443.836326420307,
"min": 246.63671757280827,
"max": 2731.7513710856438,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018714974201025646,
"min": 0.012646318567810037,
"max": 0.021522284468907554,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.056144922603076935,
"min": 0.025292637135620074,
"max": 0.05640557738212616,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05210017433596983,
"min": 0.02188228784749905,
"max": 0.06209501804163059,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15630052300790948,
"min": 0.0437645756949981,
"max": 0.1765675405661265,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.909698696800002e-06,
"min": 3.909698696800002e-06,
"max": 0.0002953064265645249,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1729096090400006e-05,
"min": 1.1729096090400006e-05,
"max": 0.0008439342186886001,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1013032,
"min": 0.1013032,
"max": 0.19843547500000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3039096,
"min": 0.20774884999999998,
"max": 0.5813113999999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.502968000000004e-05,
"min": 7.502968000000004e-05,
"max": 0.004921930202499999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002250890400000001,
"min": 0.0002250890400000001,
"max": 0.01406743886,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1702992525",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1702995125"
},
"total": 2600.150216784,
"count": 1,
"self": 0.5555386259998158,
"children": {
"run_training.setup": {
"total": 0.05078170800015869,
"count": 1,
"self": 0.05078170800015869
},
"TrainerController.start_learning": {
"total": 2599.54389645,
"count": 1,
"self": 4.883474503996695,
"children": {
"TrainerController._reset_env": {
"total": 3.325823573999969,
"count": 1,
"self": 3.325823573999969
},
"TrainerController.advance": {
"total": 2591.2163347570036,
"count": 233741,
"self": 5.192997166916484,
"children": {
"env_step": {
"total": 2036.4192825591656,
"count": 233741,
"self": 1693.4283782652094,
"children": {
"SubprocessEnvManager._take_step": {
"total": 339.8819713970465,
"count": 233741,
"self": 18.54272205007146,
"children": {
"TorchPolicy.evaluate": {
"total": 321.33924934697507,
"count": 223053,
"self": 321.33924934697507
}
}
},
"workers": {
"total": 3.108932896909664,
"count": 233741,
"self": 0.0,
"children": {
"worker_root": {
"total": 2591.8339128920597,
"count": 233741,
"is_parallel": true,
"self": 1219.0671845870731,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0006844160000127886,
"count": 1,
"is_parallel": true,
"self": 0.00022426200007430452,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00046015399993848405,
"count": 2,
"is_parallel": true,
"self": 0.00046015399993848405
}
}
},
"UnityEnvironment.step": {
"total": 0.03522680599985506,
"count": 1,
"is_parallel": true,
"self": 0.000328566999769464,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002169790000152716,
"count": 1,
"is_parallel": true,
"self": 0.0002169790000152716
},
"communicator.exchange": {
"total": 0.03391891000001124,
"count": 1,
"is_parallel": true,
"self": 0.03391891000001124
},
"steps_from_proto": {
"total": 0.0007623500000590866,
"count": 1,
"is_parallel": true,
"self": 0.0002079639998555649,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005543860002035217,
"count": 2,
"is_parallel": true,
"self": 0.0005543860002035217
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1372.7667283049866,
"count": 233740,
"is_parallel": true,
"self": 42.60082515597924,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 89.48527045395076,
"count": 233740,
"is_parallel": true,
"self": 89.48527045395076
},
"communicator.exchange": {
"total": 1144.225593093045,
"count": 233740,
"is_parallel": true,
"self": 1144.225593093045
},
"steps_from_proto": {
"total": 96.45503960201154,
"count": 233740,
"is_parallel": true,
"self": 35.85881824902822,
"children": {
"_process_rank_one_or_two_observation": {
"total": 60.59622135298332,
"count": 467480,
"is_parallel": true,
"self": 60.59622135298332
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 549.6040550309215,
"count": 233741,
"self": 7.066664288956645,
"children": {
"process_trajectory": {
"total": 167.9927145839638,
"count": 233741,
"self": 166.76905197596375,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2236626080000406,
"count": 10,
"self": 1.2236626080000406
}
}
},
"_update_policy": {
"total": 374.54467615800104,
"count": 97,
"self": 307.74507431101097,
"children": {
"TorchPPOOptimizer.update": {
"total": 66.79960184699007,
"count": 2910,
"self": 66.79960184699007
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.929999578162096e-07,
"count": 1,
"self": 9.929999578162096e-07
},
"TrainerController._save_models": {
"total": 0.11826262199974735,
"count": 1,
"self": 0.0019531179996192805,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11630950400012807,
"count": 1,
"self": 0.11630950400012807
}
}
}
}
}
}
}