ppo-Huggy-v1 / run_logs /timers.json
biwako's picture
Huggy
d6057d7
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.410075068473816,
"min": 1.4100725650787354,
"max": 1.4297492504119873,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70289.421875,
"min": 69289.578125,
"max": 78388.1953125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 101.3780487804878,
"min": 77.03276131045241,
"max": 398.8888888888889,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49878.0,
"min": 48829.0,
"max": 50260.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999639.0,
"min": 49684.0,
"max": 1999639.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999639.0,
"min": 49684.0,
"max": 1999639.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4073874950408936,
"min": 0.14734166860580444,
"max": 2.539478063583374,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1184.4346923828125,
"min": 18.417709350585938,
"max": 1583.3310546875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.598558371023434,
"min": 1.7932704255580902,
"max": 3.972337141859955,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1770.4907185435295,
"min": 224.15880319476128,
"max": 2512.707470715046,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.598558371023434,
"min": 1.7932704255580902,
"max": 3.972337141859955,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1770.4907185435295,
"min": 224.15880319476128,
"max": 2512.707470715046,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017993331598376645,
"min": 0.011582269630162045,
"max": 0.020630620672212295,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05397999479512994,
"min": 0.02316453926032409,
"max": 0.056375787520664744,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04951859940257338,
"min": 0.021052605720857778,
"max": 0.05907105219860871,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.14855579820772014,
"min": 0.042105211441715557,
"max": 0.17591376552979152,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.645548784850006e-06,
"min": 3.645548784850006e-06,
"max": 0.0002953736265421249,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0936646354550018e-05,
"min": 1.0936646354550018e-05,
"max": 0.0008440860186379997,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10121514999999999,
"min": 0.10121514999999999,
"max": 0.198457875,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30364545,
"min": 0.20759595000000008,
"max": 0.581362,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.06359850000001e-05,
"min": 7.06359850000001e-05,
"max": 0.0049230479625,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002119079550000003,
"min": 0.0002119079550000003,
"max": 0.0140699638,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682316070",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1682318379"
},
"total": 2309.140614547,
"count": 1,
"self": 0.39706639900032314,
"children": {
"run_training.setup": {
"total": 0.1154004609999788,
"count": 1,
"self": 0.1154004609999788
},
"TrainerController.start_learning": {
"total": 2308.628147687,
"count": 1,
"self": 4.11459280196641,
"children": {
"TrainerController._reset_env": {
"total": 4.054176567000013,
"count": 1,
"self": 4.054176567000013
},
"TrainerController.advance": {
"total": 2300.3229674350337,
"count": 232729,
"self": 4.40944968910253,
"children": {
"env_step": {
"total": 1792.4215408390367,
"count": 232729,
"self": 1518.717399793973,
"children": {
"SubprocessEnvManager._take_step": {
"total": 270.9358641429821,
"count": 232729,
"self": 16.04746491486418,
"children": {
"TorchPolicy.evaluate": {
"total": 254.88839922811792,
"count": 223067,
"self": 254.88839922811792
}
}
},
"workers": {
"total": 2.7682769020815385,
"count": 232729,
"self": 0.0,
"children": {
"worker_root": {
"total": 2300.5710042620503,
"count": 232729,
"is_parallel": true,
"self": 1059.217792849075,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010756140000012238,
"count": 1,
"is_parallel": true,
"self": 0.00041386500004136906,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006617489999598547,
"count": 2,
"is_parallel": true,
"self": 0.0006617489999598547
}
}
},
"UnityEnvironment.step": {
"total": 0.04361102899997604,
"count": 1,
"is_parallel": true,
"self": 0.00034060600000884733,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00017810299999609924,
"count": 1,
"is_parallel": true,
"self": 0.00017810299999609924
},
"communicator.exchange": {
"total": 0.042297468999947796,
"count": 1,
"is_parallel": true,
"self": 0.042297468999947796
},
"steps_from_proto": {
"total": 0.0007948510000233,
"count": 1,
"is_parallel": true,
"self": 0.00025131300003522483,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005435379999880752,
"count": 2,
"is_parallel": true,
"self": 0.0005435379999880752
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1241.3532114129753,
"count": 232728,
"is_parallel": true,
"self": 38.170573156935916,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 79.54436528903909,
"count": 232728,
"is_parallel": true,
"self": 79.54436528903909
},
"communicator.exchange": {
"total": 1035.3249783230754,
"count": 232728,
"is_parallel": true,
"self": 1035.3249783230754
},
"steps_from_proto": {
"total": 88.31329464392496,
"count": 232728,
"is_parallel": true,
"self": 33.169490798960055,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.14380384496491,
"count": 465456,
"is_parallel": true,
"self": 55.14380384496491
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 503.4919769068946,
"count": 232729,
"self": 6.481456897843543,
"children": {
"process_trajectory": {
"total": 130.3983854480511,
"count": 232729,
"self": 128.99800797505117,
"children": {
"RLTrainer._checkpoint": {
"total": 1.400377472999935,
"count": 10,
"self": 1.400377472999935
}
}
},
"_update_policy": {
"total": 366.61213456099995,
"count": 97,
"self": 307.81163898699754,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.80049557400241,
"count": 2910,
"self": 58.80049557400241
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0159997145819943e-06,
"count": 1,
"self": 1.0159997145819943e-06
},
"TrainerController._save_models": {
"total": 0.13640986700011126,
"count": 1,
"self": 0.0023475130001315847,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13406235399997968,
"count": 1,
"self": 0.13406235399997968
}
}
}
}
}
}
}